mirror of
https://github.com/VictoriaMetrics/VictoriaMetrics.git
synced 2024-11-21 14:44:00 +00:00
vendor: make vendor-update
This commit is contained in:
parent
5dc0bf6d3d
commit
423825695f
270 changed files with 4719 additions and 47224 deletions
16
go.mod
16
go.mod
|
@ -1,7 +1,7 @@
|
||||||
module github.com/VictoriaMetrics/VictoriaMetrics
|
module github.com/VictoriaMetrics/VictoriaMetrics
|
||||||
|
|
||||||
require (
|
require (
|
||||||
cloud.google.com/go v0.58.0 // indirect
|
cloud.google.com/go v0.59.0 // indirect
|
||||||
cloud.google.com/go/storage v1.10.0
|
cloud.google.com/go/storage v1.10.0
|
||||||
github.com/VictoriaMetrics/fastcache v1.5.7
|
github.com/VictoriaMetrics/fastcache v1.5.7
|
||||||
|
|
||||||
|
@ -10,23 +10,25 @@ require (
|
||||||
github.com/VictoriaMetrics/fasthttp v1.0.1
|
github.com/VictoriaMetrics/fasthttp v1.0.1
|
||||||
github.com/VictoriaMetrics/metrics v1.11.3
|
github.com/VictoriaMetrics/metrics v1.11.3
|
||||||
github.com/VictoriaMetrics/metricsql v0.2.3
|
github.com/VictoriaMetrics/metricsql v0.2.3
|
||||||
github.com/aws/aws-sdk-go v1.32.5
|
github.com/aws/aws-sdk-go v1.32.10
|
||||||
github.com/cespare/xxhash/v2 v2.1.1
|
github.com/cespare/xxhash/v2 v2.1.1
|
||||||
github.com/golang/snappy v0.0.1
|
github.com/golang/snappy v0.0.1
|
||||||
github.com/klauspost/compress v1.10.10
|
github.com/klauspost/compress v1.10.10
|
||||||
github.com/valyala/fastjson v1.5.2
|
github.com/valyala/fastjson v1.5.3
|
||||||
github.com/valyala/fastrand v1.0.0
|
github.com/valyala/fastrand v1.0.0
|
||||||
github.com/valyala/gozstd v1.7.0
|
github.com/valyala/gozstd v1.7.0
|
||||||
github.com/valyala/histogram v1.0.1
|
github.com/valyala/histogram v1.0.1
|
||||||
github.com/valyala/quicktemplate v1.5.0
|
github.com/valyala/quicktemplate v1.5.0
|
||||||
go.opencensus.io v0.22.4 // indirect
|
go.opencensus.io v0.22.4 // indirect
|
||||||
golang.org/x/net v0.0.0-20200602114024-627f9648deb9 // indirect
|
golang.org/x/net v0.0.0-20200625001655-4c5254603344 // indirect
|
||||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d
|
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d
|
||||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1
|
golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4
|
||||||
golang.org/x/text v0.3.3 // indirect
|
golang.org/x/text v0.3.3 // indirect
|
||||||
golang.org/x/tools v0.0.0-20200618155944-c7475b9d7fb2 // indirect
|
golang.org/x/tools v0.0.0-20200625195345-7480c7b4547d // indirect
|
||||||
google.golang.org/api v0.28.0
|
google.golang.org/api v0.28.0
|
||||||
google.golang.org/genproto v0.0.0-20200618215128-cebbd7a98dcc // indirect
|
google.golang.org/genproto v0.0.0-20200624020401-64a14ca9d1ad // indirect
|
||||||
|
google.golang.org/grpc v1.30.0 // indirect
|
||||||
|
google.golang.org/protobuf v1.25.0 // indirect
|
||||||
gopkg.in/yaml.v2 v2.3.0
|
gopkg.in/yaml.v2 v2.3.0
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
41
go.sum
41
go.sum
|
@ -13,8 +13,8 @@ cloud.google.com/go v0.56.0 h1:WRz29PgAsVEyPSDHyk+0fpEkwEFyfhHn+JbksT6gIL4=
|
||||||
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
|
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
|
||||||
cloud.google.com/go v0.57.0 h1:EpMNVUorLiZIELdMZbCYX/ByTFCdoYopYAGxaGVz9ms=
|
cloud.google.com/go v0.57.0 h1:EpMNVUorLiZIELdMZbCYX/ByTFCdoYopYAGxaGVz9ms=
|
||||||
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
|
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
|
||||||
cloud.google.com/go v0.58.0 h1:vtAfVc723K3xKq1BQydk/FyCldnaNFhGhpJxaJzgRMQ=
|
cloud.google.com/go v0.59.0 h1:BM3svUDU3itpc2m5cu5wCyThIYNDlFlts9GASw31GW8=
|
||||||
cloud.google.com/go v0.58.0/go.mod h1:W+9FnSUw6nhVwXlFcp1eL+krq5+HQUJeUogSeJZZiWg=
|
cloud.google.com/go v0.59.0/go.mod h1:qJxNOVCRTxHfwLhvDxxSI9vQc1zI59b9pEglp1Iv60E=
|
||||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||||
cloud.google.com/go/bigquery v1.4.0 h1:xE3CPsOgttP4ACBePh79zTKALtXwn/Edhcr16R5hMWU=
|
cloud.google.com/go/bigquery v1.4.0 h1:xE3CPsOgttP4ACBePh79zTKALtXwn/Edhcr16R5hMWU=
|
||||||
|
@ -57,8 +57,8 @@ github.com/VictoriaMetrics/metricsql v0.2.3 h1:xGscDmLoeIV7+8qX/mdHnOY0vu4m+wHIV
|
||||||
github.com/VictoriaMetrics/metricsql v0.2.3/go.mod h1:UIjd9S0W1UnTWlJdM0wLS+2pfuPqjwqKoK8yTos+WyE=
|
github.com/VictoriaMetrics/metricsql v0.2.3/go.mod h1:UIjd9S0W1UnTWlJdM0wLS+2pfuPqjwqKoK8yTos+WyE=
|
||||||
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=
|
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156 h1:eMwmnE/GDgah4HI848JfFxHt+iPb26b4zyfspmqY0/8=
|
||||||
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
|
github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
|
||||||
github.com/aws/aws-sdk-go v1.32.5 h1:Sz0C7deIoMu5lFGTVkIN92IEZrUz1AWIDDW+9p6n1Rk=
|
github.com/aws/aws-sdk-go v1.32.10 h1:cEJTxGcBGlsM2tN36MZQKhlK93O9HrnaRs+lq2f0zN8=
|
||||||
github.com/aws/aws-sdk-go v1.32.5/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
|
github.com/aws/aws-sdk-go v1.32.10/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
|
||||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||||
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
|
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
|
||||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
|
@ -116,6 +116,8 @@ github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
|
||||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.4.1 h1:/exdXoGamhu5ONeUJH0deniYLWYvQwW66yvlfiiKTu0=
|
github.com/google/go-cmp v0.4.1 h1:/exdXoGamhu5ONeUJH0deniYLWYvQwW66yvlfiiKTu0=
|
||||||
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w=
|
||||||
|
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
|
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
|
||||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||||
|
@ -161,8 +163,8 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5
|
||||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||||
github.com/valyala/fasthttp v1.12.0/go.mod h1:229t1eWu9UXTPmoUkbpN/fctKPBY4IJoFXQnxHGXy6E=
|
github.com/valyala/fasthttp v1.12.0/go.mod h1:229t1eWu9UXTPmoUkbpN/fctKPBY4IJoFXQnxHGXy6E=
|
||||||
github.com/valyala/fastjson v1.5.2 h1:VTbMfG0sCyXqC66PS+ME1cHXy5ClW085avDoy28t4Uo=
|
github.com/valyala/fastjson v1.5.3 h1:z4Z1Bll4WaXo+FXJoiCdW8ss7sKY2d/jYfE2ZzoT284=
|
||||||
github.com/valyala/fastjson v1.5.2/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY=
|
github.com/valyala/fastjson v1.5.3/go.mod h1:CLCAqky6SMuOcxStkYQvblddUtoRxhYMGLrsQns1aXY=
|
||||||
github.com/valyala/fastrand v1.0.0 h1:LUKT9aKer2dVQNUi3waewTbKV+7H17kvWFNKs2ObdkI=
|
github.com/valyala/fastrand v1.0.0 h1:LUKT9aKer2dVQNUi3waewTbKV+7H17kvWFNKs2ObdkI=
|
||||||
github.com/valyala/fastrand v1.0.0/go.mod h1:HWqCzkrkg6QXT8V2EXWvXCoow7vLwOFN002oeRzjapQ=
|
github.com/valyala/fastrand v1.0.0/go.mod h1:HWqCzkrkg6QXT8V2EXWvXCoow7vLwOFN002oeRzjapQ=
|
||||||
github.com/valyala/gozstd v1.7.0 h1:Ljh5c9zboqLhwTI33al32R72iCZfn0mCbVGcFWbGwRQ=
|
github.com/valyala/gozstd v1.7.0 h1:Ljh5c9zboqLhwTI33al32R72iCZfn0mCbVGcFWbGwRQ=
|
||||||
|
@ -185,6 +187,7 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk
|
||||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||||
|
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||||
|
@ -243,8 +246,8 @@ golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/
|
||||||
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||||
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2 h1:eDrdRpKgkcCqKZQwyZRyeFZgfqt37SL7Kv3tok06cKE=
|
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2 h1:eDrdRpKgkcCqKZQwyZRyeFZgfqt37SL7Kv3tok06cKE=
|
||||||
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||||
golang.org/x/net v0.0.0-20200602114024-627f9648deb9 h1:pNX+40auqi2JqRfOP1akLGtYcn15TUbkhwuCO3foqqM=
|
golang.org/x/net v0.0.0-20200625001655-4c5254603344 h1:vGXIOMxbNfDTk/aXCmfdLgkrSV+Z2tcbze+pEc3v5W4=
|
||||||
golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
|
@ -285,8 +288,8 @@ golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||||
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200523222454-059865788121 h1:rITEj+UZHYC927n8GT97eC3zrpzXdb/voyeOuVKS46o=
|
golang.org/x/sys v0.0.0-20200523222454-059865788121 h1:rITEj+UZHYC927n8GT97eC3zrpzXdb/voyeOuVKS46o=
|
||||||
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1 h1:ogLJMz+qpzav7lGMh10LMvAkM/fAoGlaiiHYiFYdm80=
|
golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4 h1:5/PjkGUjvEU5Gl6BxmvKRPpqo2uNMv4rcHBMwzk/st8=
|
||||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
|
@ -333,10 +336,10 @@ golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWc
|
||||||
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
golang.org/x/tools v0.0.0-20200606014950-c42cb6316fb6/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
|
||||||
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
golang.org/x/tools v0.0.0-20200618155944-c7475b9d7fb2 h1:5uj2ldj1KKGFXJexeG65UpexxnZYU4d1Q6en50MewmU=
|
golang.org/x/tools v0.0.0-20200622203043-20e05c1c8ffa/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
golang.org/x/tools v0.0.0-20200618155944-c7475b9d7fb2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
golang.org/x/tools v0.0.0-20200625195345-7480c7b4547d h1:V1BGE5ZHrUIYZYNEm0i7jrPwSo3ks0HSn1TrartSqME=
|
||||||
|
golang.org/x/tools v0.0.0-20200625195345-7480c7b4547d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||||
|
@ -355,8 +358,6 @@ google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/
|
||||||
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||||
google.golang.org/api v0.24.0 h1:cG03eaksBzhfSIk7JRGctfp3lanklcOM/mTGvow7BbQ=
|
google.golang.org/api v0.24.0 h1:cG03eaksBzhfSIk7JRGctfp3lanklcOM/mTGvow7BbQ=
|
||||||
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||||
google.golang.org/api v0.26.0 h1:VJZ8h6E8ip82FRpQl848c5vAadxlTXrUh8RzQzSRm08=
|
|
||||||
google.golang.org/api v0.26.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
|
||||||
google.golang.org/api v0.28.0 h1:jMF5hhVfMkTZwHW1SDpKq5CkgWLXOb31Foaca9Zr3oM=
|
google.golang.org/api v0.28.0 h1:jMF5hhVfMkTZwHW1SDpKq5CkgWLXOb31Foaca9Zr3oM=
|
||||||
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||||
|
@ -393,10 +394,10 @@ google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380 h1:xriR1EgvKfkKxIo
|
||||||
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
|
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
|
||||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||||
google.golang.org/genproto v0.0.0-20200608115520-7c474a2e3482/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
|
|
||||||
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
|
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
|
||||||
google.golang.org/genproto v0.0.0-20200618215128-cebbd7a98dcc h1:Xrg3/DpLwfPwEJr6eXoNtM30WUfACuiYhc14vYB4ea0=
|
google.golang.org/genproto v0.0.0-20200623002339-fbb79eadd5eb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
google.golang.org/genproto v0.0.0-20200618215128-cebbd7a98dcc/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
|
google.golang.org/genproto v0.0.0-20200624020401-64a14ca9d1ad h1:uAwc13+y0Y8QZLTYhLCu6lHhnG99ecQU5FYTj8zxAng=
|
||||||
|
google.golang.org/genproto v0.0.0-20200624020401-64a14ca9d1ad/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||||
|
@ -408,6 +409,8 @@ google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8
|
||||||
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
|
google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60=
|
||||||
google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4=
|
google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4=
|
||||||
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
|
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
|
||||||
|
google.golang.org/grpc v1.30.0 h1:M5a8xTlYTxwMn5ZFkwhRabsygDY5G8TYLyQDBxJNAxE=
|
||||||
|
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||||
|
@ -421,6 +424,8 @@ google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2
|
||||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA=
|
google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA=
|
||||||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||||
|
google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
|
||||||
|
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
|
26
vendor/cloud.google.com/go/CHANGES.md
generated
vendored
26
vendor/cloud.google.com/go/CHANGES.md
generated
vendored
|
@ -1,5 +1,31 @@
|
||||||
# Changes
|
# Changes
|
||||||
|
|
||||||
|
## v0.59.0
|
||||||
|
|
||||||
|
### Announcements
|
||||||
|
|
||||||
|
goolgeapis/google-cloud-go has moved its source of truth to GitHub and is no longer a mirror. This means that our
|
||||||
|
contributing process has changed a bit. We will now be conducting all code reviews on GitHub which means we now accept
|
||||||
|
pull requests! If you have a version of the codebase previously checked out you may wish to update your git remote to
|
||||||
|
point to GitHub.
|
||||||
|
|
||||||
|
### Changes
|
||||||
|
|
||||||
|
- all:
|
||||||
|
- Remove dependency on honnef.co/go/tools.
|
||||||
|
- Update our contributing instructions now that we use GitHub for reviews.
|
||||||
|
- Remove some un-inclusive terminology.
|
||||||
|
- compute/metadata:
|
||||||
|
- Pass cancelable context to DNS lookup.
|
||||||
|
- .github:
|
||||||
|
- Update templates issue/PR templates.
|
||||||
|
- internal:
|
||||||
|
- Bump several clients to GA.
|
||||||
|
- Fix GoDoc badge source.
|
||||||
|
- Several automation changes related to the move to GitHub.
|
||||||
|
- Start generating a client for asset v1p5beta1.
|
||||||
|
- Various updates to autogenerated clients.
|
||||||
|
|
||||||
## v0.58.0
|
## v0.58.0
|
||||||
|
|
||||||
### Deprecation notice
|
### Deprecation notice
|
||||||
|
|
94
vendor/cloud.google.com/go/CONTRIBUTING.md
generated
vendored
94
vendor/cloud.google.com/go/CONTRIBUTING.md
generated
vendored
|
@ -14,94 +14,34 @@
|
||||||
1. Sign one of the
|
1. Sign one of the
|
||||||
[contributor license agreements](#contributor-license-agreements) below.
|
[contributor license agreements](#contributor-license-agreements) below.
|
||||||
|
|
||||||
1. Run `go get golang.org/x/review/git-codereview && go install golang.org/x/review/git-codereview`
|
1. Clone the repo:
|
||||||
to install the code reviewing tool.
|
`git clone https://github.com/googleapis/google-cloud-go`
|
||||||
|
|
||||||
1. Ensure it's working by running `git codereview` (check your `PATH` if
|
1. Change into the checked out source:
|
||||||
not).
|
`cd google-cloud-go`
|
||||||
|
|
||||||
1. If you would like, you may want to set up aliases for `git-codereview`,
|
1. Fork the repo.
|
||||||
such that `git codereview change` becomes `git change`. See the
|
|
||||||
[godoc](https://pkg.go.dev/golang.org/x/review/git-codereview) for details.
|
|
||||||
|
|
||||||
* Should you run into issues with the `git-codereview` tool, please note
|
1. Set your fork as a remote:
|
||||||
that all error messages will assume that you have set up these aliases.
|
`git remote add fork git@github.com:GITHUB_USERNAME/google-cloud-go.git`
|
||||||
|
|
||||||
1. Change to a directory of your choosing and clone the repo.
|
1. Make changes (see [Formatting](#formatting) and [Style](#style)), commit to
|
||||||
|
your fork.
|
||||||
|
|
||||||
|
Commit messages should follow the
|
||||||
|
[Go project style](https://github.com/golang/go/wiki/CommitMessage). For example:
|
||||||
```
|
```
|
||||||
cd ~/code
|
functions: add gophers codelab
|
||||||
git clone https://code.googlesource.com/gocloud
|
|
||||||
```
|
```
|
||||||
|
|
||||||
* If you have already checked out the source, make sure that the remote
|
1. Send a pull request with your changes.
|
||||||
`git` `origin` is https://code.googlesource.com/gocloud:
|
|
||||||
|
|
||||||
```
|
1. A maintainer will review the pull request and make comments.
|
||||||
git remote -v
|
|
||||||
# ...
|
|
||||||
git remote set-url origin https://code.googlesource.com/gocloud
|
|
||||||
```
|
|
||||||
|
|
||||||
* The project uses [Go Modules](https://blog.golang.org/using-go-modules)
|
Prefer adding additional commits over amending and force-pushing since it can
|
||||||
for dependency management See
|
be difficult to follow code reviews when the commit history changes.
|
||||||
[`gopls`](https://github.com/golang/go/wiki/gopls) for making your editor
|
|
||||||
work with modules.
|
|
||||||
|
|
||||||
1. Change to the project directory:
|
|
||||||
|
|
||||||
```
|
|
||||||
cd ~/code/gocloud
|
|
||||||
```
|
|
||||||
|
|
||||||
1. Make sure your `git` auth is configured correctly by visiting
|
|
||||||
https://code.googlesource.com, clicking "Generate Password" at the top-right,
|
|
||||||
and following the directions. Otherwise, `git codereview mail` in the next step
|
|
||||||
will fail.
|
|
||||||
|
|
||||||
1. Now you are ready to make changes. Don't create a new branch or make commits in the traditional
|
|
||||||
way. Use the following`git codereview` commands to create a commit and create a Gerrit CL:
|
|
||||||
|
|
||||||
```
|
|
||||||
git codereview change <branch-name> # Use this instead of git checkout -b <branch-name>
|
|
||||||
# Make changes.
|
|
||||||
git add ...
|
|
||||||
git codereview change # Use this instead of git commit
|
|
||||||
git codereview mail # If this fails, the error message will contain instructions to fix it.
|
|
||||||
```
|
|
||||||
|
|
||||||
* This will create a new `git` branch for you to develop on. Once your
|
|
||||||
change is merged, you can delete this branch.
|
|
||||||
|
|
||||||
1. As you make changes for code review, ammend the commit and re-mail the
|
|
||||||
change:
|
|
||||||
|
|
||||||
```
|
|
||||||
# Make more changes.
|
|
||||||
git add ...
|
|
||||||
git codereview change
|
|
||||||
git codereview mail
|
|
||||||
```
|
|
||||||
|
|
||||||
* **Warning**: do not change the `Change-Id` at the bottom of the commit
|
|
||||||
message - it's how Gerrit knows which change this is (or if it's new).
|
|
||||||
|
|
||||||
* When you fixes issues from code review, respond to each code review
|
|
||||||
message then click **Reply** at the top of the page.
|
|
||||||
|
|
||||||
* Each new mailed amendment will create a new patch set for
|
|
||||||
your change in Gerrit. Patch sets can be compared and reviewed.
|
|
||||||
|
|
||||||
* **Note**: if your change includes a breaking change, our breaking change
|
|
||||||
detector will cause CI/CD to fail. If your breaking change is acceptable
|
|
||||||
in some way, add a `BREAKING_CHANGE_ACCEPTABLE=<reason>` line to the commit
|
|
||||||
message to cause the detector not to be run and to make it clear why that is
|
|
||||||
acceptable.
|
|
||||||
|
|
||||||
1. Finally, add reviewers to your CL when it's ready for review. Reviewers will
|
|
||||||
not be added automatically. If you're not sure who to add for your code review,
|
|
||||||
add tbp@, cbro@, and codyoss@.
|
|
||||||
|
|
||||||
|
Commits will be squashed when they're merged.
|
||||||
|
|
||||||
## Integration Tests
|
## Integration Tests
|
||||||
|
|
||||||
|
|
5
vendor/cloud.google.com/go/README.md
generated
vendored
5
vendor/cloud.google.com/go/README.md
generated
vendored
|
@ -1,6 +1,6 @@
|
||||||
# Google Cloud Client Libraries for Go
|
# Google Cloud Client Libraries for Go
|
||||||
|
|
||||||
[![GoDoc](https://pkg.go.dev/cloud.google.com/go?status.svg)](https://pkg.go.dev/cloud.google.com/go)
|
[![GoDoc](https://godoc.org/cloud.google.com/go?status.svg)](https://pkg.go.dev/cloud.google.com/go)
|
||||||
|
|
||||||
Go packages for [Google Cloud Platform](https://cloud.google.com) services.
|
Go packages for [Google Cloud Platform](https://cloud.google.com) services.
|
||||||
|
|
||||||
|
@ -127,8 +127,7 @@ client, err := storage.NewClient(ctx, option.WithTokenSource(tokenSource))
|
||||||
|
|
||||||
Contributions are welcome. Please, see the
|
Contributions are welcome. Please, see the
|
||||||
[CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/CONTRIBUTING.md)
|
[CONTRIBUTING](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/CONTRIBUTING.md)
|
||||||
document for details. We're using Gerrit for our code reviews. Please don't open pull
|
document for details.
|
||||||
requests against this repo, new pull requests will be automatically closed.
|
|
||||||
|
|
||||||
Please note that this project is released with a Contributor Code of Conduct.
|
Please note that this project is released with a Contributor Code of Conduct.
|
||||||
By participating in this project you agree to abide by its terms.
|
By participating in this project you agree to abide by its terms.
|
||||||
|
|
73
vendor/cloud.google.com/go/RELEASING.md
generated
vendored
73
vendor/cloud.google.com/go/RELEASING.md
generated
vendored
|
@ -10,51 +10,14 @@
|
||||||
1. Sign one of the
|
1. Sign one of the
|
||||||
[contributor license agreements](#contributor-license-agreements) below.
|
[contributor license agreements](#contributor-license-agreements) below.
|
||||||
|
|
||||||
1. Run `go get golang.org/x/review/git-codereview && go install golang.org/x/review/git-codereview`
|
1. Clone the repo:
|
||||||
to install the code reviewing tool.
|
`git clone https://github.com/googleapis/google-cloud-go`
|
||||||
|
|
||||||
1. Ensure it's working by running `git codereview` (check your `PATH` if
|
1. Change into the checked out source:
|
||||||
not).
|
`cd google-cloud-go`
|
||||||
|
|
||||||
1. If you would like, you may want to set up aliases for `git-codereview`,
|
1. Fork the repo and add your fork as a secondary remote (this is necessary in
|
||||||
such that `git codereview change` becomes `git change`. See the
|
order to create PRs).
|
||||||
[godoc](https://pkg.go.dev/golang.org/x/review/git-codereview) for details.
|
|
||||||
|
|
||||||
* Should you run into issues with the `git-codereview` tool, please note
|
|
||||||
that all error messages will assume that you have set up these aliases.
|
|
||||||
|
|
||||||
1. Change to a directory of your choosing and clone the repo.
|
|
||||||
|
|
||||||
```
|
|
||||||
cd ~/code
|
|
||||||
git clone https://code.googlesource.com/gocloud
|
|
||||||
```
|
|
||||||
|
|
||||||
* If you have already checked out the source, make sure that the remote
|
|
||||||
`git` `origin` is https://code.googlesource.com/gocloud:
|
|
||||||
|
|
||||||
```
|
|
||||||
git remote -v
|
|
||||||
# ...
|
|
||||||
git remote set-url origin https://code.googlesource.com/gocloud
|
|
||||||
```
|
|
||||||
|
|
||||||
* The project uses [Go Modules](https://blog.golang.org/using-go-modules)
|
|
||||||
for dependency management See
|
|
||||||
[`gopls`](https://github.com/golang/go/wiki/gopls) for making your editor
|
|
||||||
work with modules.
|
|
||||||
|
|
||||||
1. Change to the project directory and add the github remote:
|
|
||||||
|
|
||||||
```
|
|
||||||
cd ~/code/gocloud
|
|
||||||
git remote add github https://github.com/googleapis/google-cloud-go
|
|
||||||
```
|
|
||||||
|
|
||||||
1. Make sure your `git` auth is configured correctly by visiting
|
|
||||||
https://code.googlesource.com, clicking "Generate Password" at the top-right,
|
|
||||||
and following the directions. Otherwise, `git codereview mail` in the next step
|
|
||||||
will fail.
|
|
||||||
|
|
||||||
# Which module to release?
|
# Which module to release?
|
||||||
|
|
||||||
|
@ -98,7 +61,7 @@ the failures have been resolved.
|
||||||
# How to release `cloud.google.com/go`
|
# How to release `cloud.google.com/go`
|
||||||
|
|
||||||
1. Check for failures in the
|
1. Check for failures in the
|
||||||
[continuous Kokoro build](go/google-cloud-go-continuous). If there are any
|
[continuous Kokoro build](http://go/google-cloud-go-continuous). If there are any
|
||||||
failures in the most recent build, address them before proceeding with the
|
failures in the most recent build, address them before proceeding with the
|
||||||
release.
|
release.
|
||||||
1. Navigate to `~/code/gocloud/` and switch to master.
|
1. Navigate to `~/code/gocloud/` and switch to master.
|
||||||
|
@ -114,16 +77,15 @@ the failures have been resolved.
|
||||||
to be part of your release).
|
to be part of your release).
|
||||||
1. Edit `CHANGES.md` to include a summary of the changes.
|
1. Edit `CHANGES.md` to include a summary of the changes.
|
||||||
1. `cd internal/version && go generate && cd -`
|
1. `cd internal/version && go generate && cd -`
|
||||||
1. Mail the CL: `git add -A && git change <branch name> && git mail`
|
1. Commit the changes, push to your fork, and create a PR.
|
||||||
1. Wait for the CL to be submitted. Once it's submitted, and without submitting
|
1. Wait for the PR to be reviewed and merged. Once it's merged, and without
|
||||||
any other CLs in the meantime:
|
merging any other PRs in the meantime:
|
||||||
a. Switch to master.
|
a. Switch to master.
|
||||||
b. `git pull`
|
b. `git pull`
|
||||||
c. Tag the repo with the next version: `git tag $NV`.
|
c. Tag the repo with the next version: `git tag $NV`.
|
||||||
d. Push the tag to both remotes:
|
d. Push the tag to origin:
|
||||||
`git push origin $NV`
|
`git push origin $NV`
|
||||||
`git push github $NV`
|
2. Update [the releases page](https://github.com/googleapis/google-cloud-go/releases)
|
||||||
1. Update [the releases page](https://github.com/googleapis/google-cloud-go/releases)
|
|
||||||
with the new release, copying the contents of `CHANGES.md`.
|
with the new release, copying the contents of `CHANGES.md`.
|
||||||
|
|
||||||
# How to release a submodule
|
# How to release a submodule
|
||||||
|
@ -136,7 +98,7 @@ To release a submodule:
|
||||||
(these instructions assume we're releasing `cloud.google.com/go/datastore` - adjust accordingly)
|
(these instructions assume we're releasing `cloud.google.com/go/datastore` - adjust accordingly)
|
||||||
|
|
||||||
1. Check for failures in the
|
1. Check for failures in the
|
||||||
[continuous Kokoro build](go/google-cloud-go-continuous). If there are any
|
[continuous Kokoro build](http://go/google-cloud-go-continuous). If there are any
|
||||||
failures in the most recent build, address them before proceeding with the
|
failures in the most recent build, address them before proceeding with the
|
||||||
release. (This applies even if the failures are in a different submodule from the one
|
release. (This applies even if the failures are in a different submodule from the one
|
||||||
being released.)
|
being released.)
|
||||||
|
@ -150,15 +112,14 @@ To release a submodule:
|
||||||
submodule directory since the last release.
|
submodule directory since the last release.
|
||||||
1. Edit `datastore/CHANGES.md` to include a summary of the changes.
|
1. Edit `datastore/CHANGES.md` to include a summary of the changes.
|
||||||
1. `cd internal/version && go generate && cd -`
|
1. `cd internal/version && go generate && cd -`
|
||||||
1. Mail the CL: `git add -A && git change <branch name> && git mail`
|
1. Commit the changes, push to your fork, and create a PR.
|
||||||
1. Wait for the CL to be submitted. Once it's submitted, and without submitting
|
1. Wait for the PR to be reviewed and merged. Once it's merged, and without
|
||||||
any other CLs in the meantime:
|
merging any other PRs in the meantime:
|
||||||
a. Switch to master.
|
a. Switch to master.
|
||||||
b. `git pull`
|
b. `git pull`
|
||||||
c. Tag the repo with the next version: `git tag $NV`.
|
c. Tag the repo with the next version: `git tag $NV`.
|
||||||
d. Push the tag to both remotes:
|
d. Push the tag to origin:
|
||||||
`git push origin $NV`
|
`git push origin $NV`
|
||||||
`git push github $NV`
|
|
||||||
1. Update [the releases page](https://github.com/googleapis/google-cloud-go/releases)
|
1. Update [the releases page](https://github.com/googleapis/google-cloud-go/releases)
|
||||||
with the new release, copying the contents of `datastore/CHANGES.md`.
|
with the new release, copying the contents of `datastore/CHANGES.md`.
|
||||||
|
|
||||||
|
|
2
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
2
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
|
@ -140,7 +140,7 @@ func testOnGCE() bool {
|
||||||
}()
|
}()
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
addrs, err := net.LookupHost("metadata.google.internal")
|
addrs, err := net.DefaultResolver.LookupHost(ctx, "metadata.google.internal")
|
||||||
if err != nil || len(addrs) == 0 {
|
if err != nil || len(addrs) == 0 {
|
||||||
resc <- false
|
resc <- false
|
||||||
return
|
return
|
||||||
|
|
7
vendor/cloud.google.com/go/go.mod
generated
vendored
7
vendor/cloud.google.com/go/go.mod
generated
vendored
|
@ -20,9 +20,8 @@ require (
|
||||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d
|
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d
|
||||||
golang.org/x/sys v0.0.0-20200523222454-059865788121 // indirect
|
golang.org/x/sys v0.0.0-20200523222454-059865788121 // indirect
|
||||||
golang.org/x/text v0.3.2
|
golang.org/x/text v0.3.2
|
||||||
golang.org/x/tools v0.0.0-20200606014950-c42cb6316fb6
|
golang.org/x/tools v0.0.0-20200622203043-20e05c1c8ffa
|
||||||
google.golang.org/api v0.26.0
|
google.golang.org/api v0.28.0
|
||||||
google.golang.org/genproto v0.0.0-20200608115520-7c474a2e3482
|
google.golang.org/genproto v0.0.0-20200623002339-fbb79eadd5eb
|
||||||
google.golang.org/grpc v1.29.1
|
google.golang.org/grpc v1.29.1
|
||||||
honnef.co/go/tools v0.0.1-2020.1.4
|
|
||||||
)
|
)
|
||||||
|
|
12
vendor/cloud.google.com/go/go.sum
generated
vendored
12
vendor/cloud.google.com/go/go.sum
generated
vendored
|
@ -334,8 +334,8 @@ golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roY
|
||||||
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88 h1:4j84u0sokprDu3IdSYHJMmou+YSLflMz8p7yAx/QI4g=
|
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88 h1:4j84u0sokprDu3IdSYHJMmou+YSLflMz8p7yAx/QI4g=
|
||||||
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
golang.org/x/tools v0.0.0-20200606014950-c42cb6316fb6 h1:5Y8c5HBW6hBYnGEE3AbJPV0R8RsQmg1/eaJrpvasns0=
|
golang.org/x/tools v0.0.0-20200622203043-20e05c1c8ffa h1:mMXQKlWCw9mIWgVLLfiycDZjMHMMYqiuakI4E/l2xcA=
|
||||||
golang.org/x/tools v0.0.0-20200606014950-c42cb6316fb6/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
golang.org/x/tools v0.0.0-20200622203043-20e05c1c8ffa/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||||
|
@ -359,8 +359,8 @@ google.golang.org/api v0.22.0 h1:J1Pl9P2lnmYFSJvgs70DKELqHNh8CNWXPbud4njEE2s=
|
||||||
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||||
google.golang.org/api v0.24.0 h1:cG03eaksBzhfSIk7JRGctfp3lanklcOM/mTGvow7BbQ=
|
google.golang.org/api v0.24.0 h1:cG03eaksBzhfSIk7JRGctfp3lanklcOM/mTGvow7BbQ=
|
||||||
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||||
google.golang.org/api v0.26.0 h1:VJZ8h6E8ip82FRpQl848c5vAadxlTXrUh8RzQzSRm08=
|
google.golang.org/api v0.28.0 h1:jMF5hhVfMkTZwHW1SDpKq5CkgWLXOb31Foaca9Zr3oM=
|
||||||
google.golang.org/api v0.26.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||||
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
|
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
|
||||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
|
@ -404,8 +404,8 @@ google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380 h1:xriR1EgvKfkKxIo
|
||||||
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||||
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
|
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
|
||||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||||
google.golang.org/genproto v0.0.0-20200608115520-7c474a2e3482 h1:i+Aiej6cta/Frzp13/swvwz5O00kYcSe0A/C5Wd7zX8=
|
google.golang.org/genproto v0.0.0-20200623002339-fbb79eadd5eb h1:PUcq6RTy8Gp9xukBme8m2+2Z8pQCmJ7TbPpQd6xNDvk=
|
||||||
google.golang.org/genproto v0.0.0-20200608115520-7c474a2e3482/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
|
google.golang.org/genproto v0.0.0-20200623002339-fbb79eadd5eb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8=
|
google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8=
|
||||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU=
|
google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU=
|
||||||
|
|
18
vendor/cloud.google.com/go/internal/.repo-metadata-full.json
generated
vendored
18
vendor/cloud.google.com/go/internal/.repo-metadata-full.json
generated
vendored
|
@ -23,6 +23,14 @@
|
||||||
"docs_url": "https://pkg.go.dev/cloud.google.com/go/asset/apiv1p2beta1",
|
"docs_url": "https://pkg.go.dev/cloud.google.com/go/asset/apiv1p2beta1",
|
||||||
"release_level": "beta"
|
"release_level": "beta"
|
||||||
},
|
},
|
||||||
|
"cloud.google.com/go/asset/apiv1p5beta1": {
|
||||||
|
"distribution_name": "cloud.google.com/go/asset/apiv1p5beta1",
|
||||||
|
"description": "Cloud Asset API",
|
||||||
|
"language": "Go",
|
||||||
|
"client_library_type": "generated",
|
||||||
|
"docs_url": "https://pkg.go.dev/cloud.google.com/go/asset/apiv1p5beta1",
|
||||||
|
"release_level": "beta"
|
||||||
|
},
|
||||||
"cloud.google.com/go/automl/apiv1": {
|
"cloud.google.com/go/automl/apiv1": {
|
||||||
"distribution_name": "cloud.google.com/go/automl/apiv1",
|
"distribution_name": "cloud.google.com/go/automl/apiv1",
|
||||||
"description": "Cloud AutoML API",
|
"description": "Cloud AutoML API",
|
||||||
|
@ -53,7 +61,7 @@
|
||||||
"language": "Go",
|
"language": "Go",
|
||||||
"client_library_type": "generated",
|
"client_library_type": "generated",
|
||||||
"docs_url": "https://pkg.go.dev/cloud.google.com/go/bigquery/connection/apiv1",
|
"docs_url": "https://pkg.go.dev/cloud.google.com/go/bigquery/connection/apiv1",
|
||||||
"release_level": "beta"
|
"release_level": "ga"
|
||||||
},
|
},
|
||||||
"cloud.google.com/go/bigquery/connection/apiv1beta1": {
|
"cloud.google.com/go/bigquery/connection/apiv1beta1": {
|
||||||
"distribution_name": "cloud.google.com/go/bigquery/connection/apiv1beta1",
|
"distribution_name": "cloud.google.com/go/bigquery/connection/apiv1beta1",
|
||||||
|
@ -397,7 +405,7 @@
|
||||||
"language": "Go",
|
"language": "Go",
|
||||||
"client_library_type": "generated",
|
"client_library_type": "generated",
|
||||||
"docs_url": "https://pkg.go.dev/cloud.google.com/go/osconfig/agentendpoint/apiv1",
|
"docs_url": "https://pkg.go.dev/cloud.google.com/go/osconfig/agentendpoint/apiv1",
|
||||||
"release_level": "beta"
|
"release_level": "ga"
|
||||||
},
|
},
|
||||||
"cloud.google.com/go/osconfig/agentendpoint/apiv1beta": {
|
"cloud.google.com/go/osconfig/agentendpoint/apiv1beta": {
|
||||||
"distribution_name": "cloud.google.com/go/osconfig/agentendpoint/apiv1beta",
|
"distribution_name": "cloud.google.com/go/osconfig/agentendpoint/apiv1beta",
|
||||||
|
@ -553,7 +561,7 @@
|
||||||
},
|
},
|
||||||
"cloud.google.com/go/securitycenter/apiv1": {
|
"cloud.google.com/go/securitycenter/apiv1": {
|
||||||
"distribution_name": "cloud.google.com/go/securitycenter/apiv1",
|
"distribution_name": "cloud.google.com/go/securitycenter/apiv1",
|
||||||
"description": "Cloud Security Command Center API",
|
"description": "Security Command Center API",
|
||||||
"language": "Go",
|
"language": "Go",
|
||||||
"client_library_type": "generated",
|
"client_library_type": "generated",
|
||||||
"docs_url": "https://pkg.go.dev/cloud.google.com/go/securitycenter/apiv1",
|
"docs_url": "https://pkg.go.dev/cloud.google.com/go/securitycenter/apiv1",
|
||||||
|
@ -569,7 +577,7 @@
|
||||||
},
|
},
|
||||||
"cloud.google.com/go/securitycenter/apiv1p1beta1": {
|
"cloud.google.com/go/securitycenter/apiv1p1beta1": {
|
||||||
"distribution_name": "cloud.google.com/go/securitycenter/apiv1p1beta1",
|
"distribution_name": "cloud.google.com/go/securitycenter/apiv1p1beta1",
|
||||||
"description": "Cloud Security Command Center API",
|
"description": "Security Command Center API",
|
||||||
"language": "Go",
|
"language": "Go",
|
||||||
"client_library_type": "generated",
|
"client_library_type": "generated",
|
||||||
"docs_url": "https://pkg.go.dev/cloud.google.com/go/securitycenter/apiv1p1beta1",
|
"docs_url": "https://pkg.go.dev/cloud.google.com/go/securitycenter/apiv1p1beta1",
|
||||||
|
@ -733,7 +741,7 @@
|
||||||
"language": "Go",
|
"language": "Go",
|
||||||
"client_library_type": "generated",
|
"client_library_type": "generated",
|
||||||
"docs_url": "https://pkg.go.dev/cloud.google.com/go/webrisk/apiv1",
|
"docs_url": "https://pkg.go.dev/cloud.google.com/go/webrisk/apiv1",
|
||||||
"release_level": "beta"
|
"release_level": "ga"
|
||||||
},
|
},
|
||||||
"cloud.google.com/go/webrisk/apiv1beta1": {
|
"cloud.google.com/go/webrisk/apiv1beta1": {
|
||||||
"distribution_name": "cloud.google.com/go/webrisk/apiv1beta1",
|
"distribution_name": "cloud.google.com/go/webrisk/apiv1beta1",
|
||||||
|
|
2
vendor/cloud.google.com/go/internal/version/version.go
generated
vendored
2
vendor/cloud.google.com/go/internal/version/version.go
generated
vendored
|
@ -26,7 +26,7 @@ import (
|
||||||
|
|
||||||
// Repo is the current version of the client libraries in this
|
// Repo is the current version of the client libraries in this
|
||||||
// repo. It should be a date in YYYYMMDD format.
|
// repo. It should be a date in YYYYMMDD format.
|
||||||
const Repo = "20200602"
|
const Repo = "20200618"
|
||||||
|
|
||||||
// Go returns the Go runtime version. The returned string
|
// Go returns the Go runtime version. The returned string
|
||||||
// has no whitespace.
|
// has no whitespace.
|
||||||
|
|
1
vendor/cloud.google.com/go/tools.go
generated
vendored
1
vendor/cloud.google.com/go/tools.go
generated
vendored
|
@ -28,5 +28,4 @@ import (
|
||||||
_ "github.com/jstemmer/go-junit-report"
|
_ "github.com/jstemmer/go-junit-report"
|
||||||
_ "golang.org/x/lint/golint"
|
_ "golang.org/x/lint/golint"
|
||||||
_ "golang.org/x/tools/cmd/goimports"
|
_ "golang.org/x/tools/cmd/goimports"
|
||||||
_ "honnef.co/go/tools/cmd/staticcheck"
|
|
||||||
)
|
)
|
||||||
|
|
5
vendor/github.com/BurntSushi/toml/.gitignore
generated
vendored
5
vendor/github.com/BurntSushi/toml/.gitignore
generated
vendored
|
@ -1,5 +0,0 @@
|
||||||
TAGS
|
|
||||||
tags
|
|
||||||
.*.swp
|
|
||||||
tomlcheck/tomlcheck
|
|
||||||
toml.test
|
|
15
vendor/github.com/BurntSushi/toml/.travis.yml
generated
vendored
15
vendor/github.com/BurntSushi/toml/.travis.yml
generated
vendored
|
@ -1,15 +0,0 @@
|
||||||
language: go
|
|
||||||
go:
|
|
||||||
- 1.1
|
|
||||||
- 1.2
|
|
||||||
- 1.3
|
|
||||||
- 1.4
|
|
||||||
- 1.5
|
|
||||||
- 1.6
|
|
||||||
- tip
|
|
||||||
install:
|
|
||||||
- go install ./...
|
|
||||||
- go get github.com/BurntSushi/toml-test
|
|
||||||
script:
|
|
||||||
- export PATH="$PATH:$HOME/gopath/bin"
|
|
||||||
- make test
|
|
3
vendor/github.com/BurntSushi/toml/COMPATIBLE
generated
vendored
3
vendor/github.com/BurntSushi/toml/COMPATIBLE
generated
vendored
|
@ -1,3 +0,0 @@
|
||||||
Compatible with TOML version
|
|
||||||
[v0.4.0](https://github.com/toml-lang/toml/blob/v0.4.0/versions/en/toml-v0.4.0.md)
|
|
||||||
|
|
21
vendor/github.com/BurntSushi/toml/COPYING
generated
vendored
21
vendor/github.com/BurntSushi/toml/COPYING
generated
vendored
|
@ -1,21 +0,0 @@
|
||||||
The MIT License (MIT)
|
|
||||||
|
|
||||||
Copyright (c) 2013 TOML authors
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in
|
|
||||||
all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
||||||
THE SOFTWARE.
|
|
19
vendor/github.com/BurntSushi/toml/Makefile
generated
vendored
19
vendor/github.com/BurntSushi/toml/Makefile
generated
vendored
|
@ -1,19 +0,0 @@
|
||||||
install:
|
|
||||||
go install ./...
|
|
||||||
|
|
||||||
test: install
|
|
||||||
go test -v
|
|
||||||
toml-test toml-test-decoder
|
|
||||||
toml-test -encoder toml-test-encoder
|
|
||||||
|
|
||||||
fmt:
|
|
||||||
gofmt -w *.go */*.go
|
|
||||||
colcheck *.go */*.go
|
|
||||||
|
|
||||||
tags:
|
|
||||||
find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS
|
|
||||||
|
|
||||||
push:
|
|
||||||
git push origin master
|
|
||||||
git push github master
|
|
||||||
|
|
218
vendor/github.com/BurntSushi/toml/README.md
generated
vendored
218
vendor/github.com/BurntSushi/toml/README.md
generated
vendored
|
@ -1,218 +0,0 @@
|
||||||
## TOML parser and encoder for Go with reflection
|
|
||||||
|
|
||||||
TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
|
|
||||||
reflection interface similar to Go's standard library `json` and `xml`
|
|
||||||
packages. This package also supports the `encoding.TextUnmarshaler` and
|
|
||||||
`encoding.TextMarshaler` interfaces so that you can define custom data
|
|
||||||
representations. (There is an example of this below.)
|
|
||||||
|
|
||||||
Spec: https://github.com/toml-lang/toml
|
|
||||||
|
|
||||||
Compatible with TOML version
|
|
||||||
[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md)
|
|
||||||
|
|
||||||
Documentation: https://godoc.org/github.com/BurntSushi/toml
|
|
||||||
|
|
||||||
Installation:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
go get github.com/BurntSushi/toml
|
|
||||||
```
|
|
||||||
|
|
||||||
Try the toml validator:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
go get github.com/BurntSushi/toml/cmd/tomlv
|
|
||||||
tomlv some-toml-file.toml
|
|
||||||
```
|
|
||||||
|
|
||||||
[![Build Status](https://travis-ci.org/BurntSushi/toml.svg?branch=master)](https://travis-ci.org/BurntSushi/toml) [![GoDoc](https://godoc.org/github.com/BurntSushi/toml?status.svg)](https://godoc.org/github.com/BurntSushi/toml)
|
|
||||||
|
|
||||||
### Testing
|
|
||||||
|
|
||||||
This package passes all tests in
|
|
||||||
[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder
|
|
||||||
and the encoder.
|
|
||||||
|
|
||||||
### Examples
|
|
||||||
|
|
||||||
This package works similarly to how the Go standard library handles `XML`
|
|
||||||
and `JSON`. Namely, data is loaded into Go values via reflection.
|
|
||||||
|
|
||||||
For the simplest example, consider some TOML file as just a list of keys
|
|
||||||
and values:
|
|
||||||
|
|
||||||
```toml
|
|
||||||
Age = 25
|
|
||||||
Cats = [ "Cauchy", "Plato" ]
|
|
||||||
Pi = 3.14
|
|
||||||
Perfection = [ 6, 28, 496, 8128 ]
|
|
||||||
DOB = 1987-07-05T05:45:00Z
|
|
||||||
```
|
|
||||||
|
|
||||||
Which could be defined in Go as:
|
|
||||||
|
|
||||||
```go
|
|
||||||
type Config struct {
|
|
||||||
Age int
|
|
||||||
Cats []string
|
|
||||||
Pi float64
|
|
||||||
Perfection []int
|
|
||||||
DOB time.Time // requires `import time`
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
And then decoded with:
|
|
||||||
|
|
||||||
```go
|
|
||||||
var conf Config
|
|
||||||
if _, err := toml.Decode(tomlData, &conf); err != nil {
|
|
||||||
// handle error
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
You can also use struct tags if your struct field name doesn't map to a TOML
|
|
||||||
key value directly:
|
|
||||||
|
|
||||||
```toml
|
|
||||||
some_key_NAME = "wat"
|
|
||||||
```
|
|
||||||
|
|
||||||
```go
|
|
||||||
type TOML struct {
|
|
||||||
ObscureKey string `toml:"some_key_NAME"`
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Using the `encoding.TextUnmarshaler` interface
|
|
||||||
|
|
||||||
Here's an example that automatically parses duration strings into
|
|
||||||
`time.Duration` values:
|
|
||||||
|
|
||||||
```toml
|
|
||||||
[[song]]
|
|
||||||
name = "Thunder Road"
|
|
||||||
duration = "4m49s"
|
|
||||||
|
|
||||||
[[song]]
|
|
||||||
name = "Stairway to Heaven"
|
|
||||||
duration = "8m03s"
|
|
||||||
```
|
|
||||||
|
|
||||||
Which can be decoded with:
|
|
||||||
|
|
||||||
```go
|
|
||||||
type song struct {
|
|
||||||
Name string
|
|
||||||
Duration duration
|
|
||||||
}
|
|
||||||
type songs struct {
|
|
||||||
Song []song
|
|
||||||
}
|
|
||||||
var favorites songs
|
|
||||||
if _, err := toml.Decode(blob, &favorites); err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, s := range favorites.Song {
|
|
||||||
fmt.Printf("%s (%s)\n", s.Name, s.Duration)
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
And you'll also need a `duration` type that satisfies the
|
|
||||||
`encoding.TextUnmarshaler` interface:
|
|
||||||
|
|
||||||
```go
|
|
||||||
type duration struct {
|
|
||||||
time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
func (d *duration) UnmarshalText(text []byte) error {
|
|
||||||
var err error
|
|
||||||
d.Duration, err = time.ParseDuration(string(text))
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### More complex usage
|
|
||||||
|
|
||||||
Here's an example of how to load the example from the official spec page:
|
|
||||||
|
|
||||||
```toml
|
|
||||||
# This is a TOML document. Boom.
|
|
||||||
|
|
||||||
title = "TOML Example"
|
|
||||||
|
|
||||||
[owner]
|
|
||||||
name = "Tom Preston-Werner"
|
|
||||||
organization = "GitHub"
|
|
||||||
bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
|
|
||||||
dob = 1979-05-27T07:32:00Z # First class dates? Why not?
|
|
||||||
|
|
||||||
[database]
|
|
||||||
server = "192.168.1.1"
|
|
||||||
ports = [ 8001, 8001, 8002 ]
|
|
||||||
connection_max = 5000
|
|
||||||
enabled = true
|
|
||||||
|
|
||||||
[servers]
|
|
||||||
|
|
||||||
# You can indent as you please. Tabs or spaces. TOML don't care.
|
|
||||||
[servers.alpha]
|
|
||||||
ip = "10.0.0.1"
|
|
||||||
dc = "eqdc10"
|
|
||||||
|
|
||||||
[servers.beta]
|
|
||||||
ip = "10.0.0.2"
|
|
||||||
dc = "eqdc10"
|
|
||||||
|
|
||||||
[clients]
|
|
||||||
data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
|
|
||||||
|
|
||||||
# Line breaks are OK when inside arrays
|
|
||||||
hosts = [
|
|
||||||
"alpha",
|
|
||||||
"omega"
|
|
||||||
]
|
|
||||||
```
|
|
||||||
|
|
||||||
And the corresponding Go types are:
|
|
||||||
|
|
||||||
```go
|
|
||||||
type tomlConfig struct {
|
|
||||||
Title string
|
|
||||||
Owner ownerInfo
|
|
||||||
DB database `toml:"database"`
|
|
||||||
Servers map[string]server
|
|
||||||
Clients clients
|
|
||||||
}
|
|
||||||
|
|
||||||
type ownerInfo struct {
|
|
||||||
Name string
|
|
||||||
Org string `toml:"organization"`
|
|
||||||
Bio string
|
|
||||||
DOB time.Time
|
|
||||||
}
|
|
||||||
|
|
||||||
type database struct {
|
|
||||||
Server string
|
|
||||||
Ports []int
|
|
||||||
ConnMax int `toml:"connection_max"`
|
|
||||||
Enabled bool
|
|
||||||
}
|
|
||||||
|
|
||||||
type server struct {
|
|
||||||
IP string
|
|
||||||
DC string
|
|
||||||
}
|
|
||||||
|
|
||||||
type clients struct {
|
|
||||||
Data [][]interface{}
|
|
||||||
Hosts []string
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Note that a case insensitive match will be tried if an exact match can't be
|
|
||||||
found.
|
|
||||||
|
|
||||||
A working example of the above can be found in `_examples/example.{go,toml}`.
|
|
509
vendor/github.com/BurntSushi/toml/decode.go
generated
vendored
509
vendor/github.com/BurntSushi/toml/decode.go
generated
vendored
|
@ -1,509 +0,0 @@
|
||||||
package toml
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"math"
|
|
||||||
"reflect"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func e(format string, args ...interface{}) error {
|
|
||||||
return fmt.Errorf("toml: "+format, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unmarshaler is the interface implemented by objects that can unmarshal a
|
|
||||||
// TOML description of themselves.
|
|
||||||
type Unmarshaler interface {
|
|
||||||
UnmarshalTOML(interface{}) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`.
|
|
||||||
func Unmarshal(p []byte, v interface{}) error {
|
|
||||||
_, err := Decode(string(p), v)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Primitive is a TOML value that hasn't been decoded into a Go value.
|
|
||||||
// When using the various `Decode*` functions, the type `Primitive` may
|
|
||||||
// be given to any value, and its decoding will be delayed.
|
|
||||||
//
|
|
||||||
// A `Primitive` value can be decoded using the `PrimitiveDecode` function.
|
|
||||||
//
|
|
||||||
// The underlying representation of a `Primitive` value is subject to change.
|
|
||||||
// Do not rely on it.
|
|
||||||
//
|
|
||||||
// N.B. Primitive values are still parsed, so using them will only avoid
|
|
||||||
// the overhead of reflection. They can be useful when you don't know the
|
|
||||||
// exact type of TOML data until run time.
|
|
||||||
type Primitive struct {
|
|
||||||
undecoded interface{}
|
|
||||||
context Key
|
|
||||||
}
|
|
||||||
|
|
||||||
// DEPRECATED!
|
|
||||||
//
|
|
||||||
// Use MetaData.PrimitiveDecode instead.
|
|
||||||
func PrimitiveDecode(primValue Primitive, v interface{}) error {
|
|
||||||
md := MetaData{decoded: make(map[string]bool)}
|
|
||||||
return md.unify(primValue.undecoded, rvalue(v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrimitiveDecode is just like the other `Decode*` functions, except it
|
|
||||||
// decodes a TOML value that has already been parsed. Valid primitive values
|
|
||||||
// can *only* be obtained from values filled by the decoder functions,
|
|
||||||
// including this method. (i.e., `v` may contain more `Primitive`
|
|
||||||
// values.)
|
|
||||||
//
|
|
||||||
// Meta data for primitive values is included in the meta data returned by
|
|
||||||
// the `Decode*` functions with one exception: keys returned by the Undecoded
|
|
||||||
// method will only reflect keys that were decoded. Namely, any keys hidden
|
|
||||||
// behind a Primitive will be considered undecoded. Executing this method will
|
|
||||||
// update the undecoded keys in the meta data. (See the example.)
|
|
||||||
func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
|
|
||||||
md.context = primValue.context
|
|
||||||
defer func() { md.context = nil }()
|
|
||||||
return md.unify(primValue.undecoded, rvalue(v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode will decode the contents of `data` in TOML format into a pointer
|
|
||||||
// `v`.
|
|
||||||
//
|
|
||||||
// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be
|
|
||||||
// used interchangeably.)
|
|
||||||
//
|
|
||||||
// TOML arrays of tables correspond to either a slice of structs or a slice
|
|
||||||
// of maps.
|
|
||||||
//
|
|
||||||
// TOML datetimes correspond to Go `time.Time` values.
|
|
||||||
//
|
|
||||||
// All other TOML types (float, string, int, bool and array) correspond
|
|
||||||
// to the obvious Go types.
|
|
||||||
//
|
|
||||||
// An exception to the above rules is if a type implements the
|
|
||||||
// encoding.TextUnmarshaler interface. In this case, any primitive TOML value
|
|
||||||
// (floats, strings, integers, booleans and datetimes) will be converted to
|
|
||||||
// a byte string and given to the value's UnmarshalText method. See the
|
|
||||||
// Unmarshaler example for a demonstration with time duration strings.
|
|
||||||
//
|
|
||||||
// Key mapping
|
|
||||||
//
|
|
||||||
// TOML keys can map to either keys in a Go map or field names in a Go
|
|
||||||
// struct. The special `toml` struct tag may be used to map TOML keys to
|
|
||||||
// struct fields that don't match the key name exactly. (See the example.)
|
|
||||||
// A case insensitive match to struct names will be tried if an exact match
|
|
||||||
// can't be found.
|
|
||||||
//
|
|
||||||
// The mapping between TOML values and Go values is loose. That is, there
|
|
||||||
// may exist TOML values that cannot be placed into your representation, and
|
|
||||||
// there may be parts of your representation that do not correspond to
|
|
||||||
// TOML values. This loose mapping can be made stricter by using the IsDefined
|
|
||||||
// and/or Undecoded methods on the MetaData returned.
|
|
||||||
//
|
|
||||||
// This decoder will not handle cyclic types. If a cyclic type is passed,
|
|
||||||
// `Decode` will not terminate.
|
|
||||||
func Decode(data string, v interface{}) (MetaData, error) {
|
|
||||||
rv := reflect.ValueOf(v)
|
|
||||||
if rv.Kind() != reflect.Ptr {
|
|
||||||
return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v))
|
|
||||||
}
|
|
||||||
if rv.IsNil() {
|
|
||||||
return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v))
|
|
||||||
}
|
|
||||||
p, err := parse(data)
|
|
||||||
if err != nil {
|
|
||||||
return MetaData{}, err
|
|
||||||
}
|
|
||||||
md := MetaData{
|
|
||||||
p.mapping, p.types, p.ordered,
|
|
||||||
make(map[string]bool, len(p.ordered)), nil,
|
|
||||||
}
|
|
||||||
return md, md.unify(p.mapping, indirect(rv))
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeFile is just like Decode, except it will automatically read the
|
|
||||||
// contents of the file at `fpath` and decode it for you.
|
|
||||||
func DecodeFile(fpath string, v interface{}) (MetaData, error) {
|
|
||||||
bs, err := ioutil.ReadFile(fpath)
|
|
||||||
if err != nil {
|
|
||||||
return MetaData{}, err
|
|
||||||
}
|
|
||||||
return Decode(string(bs), v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DecodeReader is just like Decode, except it will consume all bytes
|
|
||||||
// from the reader and decode it for you.
|
|
||||||
func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
|
|
||||||
bs, err := ioutil.ReadAll(r)
|
|
||||||
if err != nil {
|
|
||||||
return MetaData{}, err
|
|
||||||
}
|
|
||||||
return Decode(string(bs), v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// unify performs a sort of type unification based on the structure of `rv`,
|
|
||||||
// which is the client representation.
|
|
||||||
//
|
|
||||||
// Any type mismatch produces an error. Finding a type that we don't know
|
|
||||||
// how to handle produces an unsupported type error.
|
|
||||||
func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
|
|
||||||
|
|
||||||
// Special case. Look for a `Primitive` value.
|
|
||||||
if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
|
|
||||||
// Save the undecoded data and the key context into the primitive
|
|
||||||
// value.
|
|
||||||
context := make(Key, len(md.context))
|
|
||||||
copy(context, md.context)
|
|
||||||
rv.Set(reflect.ValueOf(Primitive{
|
|
||||||
undecoded: data,
|
|
||||||
context: context,
|
|
||||||
}))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Special case. Unmarshaler Interface support.
|
|
||||||
if rv.CanAddr() {
|
|
||||||
if v, ok := rv.Addr().Interface().(Unmarshaler); ok {
|
|
||||||
return v.UnmarshalTOML(data)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Special case. Handle time.Time values specifically.
|
|
||||||
// TODO: Remove this code when we decide to drop support for Go 1.1.
|
|
||||||
// This isn't necessary in Go 1.2 because time.Time satisfies the encoding
|
|
||||||
// interfaces.
|
|
||||||
if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) {
|
|
||||||
return md.unifyDatetime(data, rv)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Special case. Look for a value satisfying the TextUnmarshaler interface.
|
|
||||||
if v, ok := rv.Interface().(TextUnmarshaler); ok {
|
|
||||||
return md.unifyText(data, v)
|
|
||||||
}
|
|
||||||
// BUG(burntsushi)
|
|
||||||
// The behavior here is incorrect whenever a Go type satisfies the
|
|
||||||
// encoding.TextUnmarshaler interface but also corresponds to a TOML
|
|
||||||
// hash or array. In particular, the unmarshaler should only be applied
|
|
||||||
// to primitive TOML values. But at this point, it will be applied to
|
|
||||||
// all kinds of values and produce an incorrect error whenever those values
|
|
||||||
// are hashes or arrays (including arrays of tables).
|
|
||||||
|
|
||||||
k := rv.Kind()
|
|
||||||
|
|
||||||
// laziness
|
|
||||||
if k >= reflect.Int && k <= reflect.Uint64 {
|
|
||||||
return md.unifyInt(data, rv)
|
|
||||||
}
|
|
||||||
switch k {
|
|
||||||
case reflect.Ptr:
|
|
||||||
elem := reflect.New(rv.Type().Elem())
|
|
||||||
err := md.unify(data, reflect.Indirect(elem))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
rv.Set(elem)
|
|
||||||
return nil
|
|
||||||
case reflect.Struct:
|
|
||||||
return md.unifyStruct(data, rv)
|
|
||||||
case reflect.Map:
|
|
||||||
return md.unifyMap(data, rv)
|
|
||||||
case reflect.Array:
|
|
||||||
return md.unifyArray(data, rv)
|
|
||||||
case reflect.Slice:
|
|
||||||
return md.unifySlice(data, rv)
|
|
||||||
case reflect.String:
|
|
||||||
return md.unifyString(data, rv)
|
|
||||||
case reflect.Bool:
|
|
||||||
return md.unifyBool(data, rv)
|
|
||||||
case reflect.Interface:
|
|
||||||
// we only support empty interfaces.
|
|
||||||
if rv.NumMethod() > 0 {
|
|
||||||
return e("unsupported type %s", rv.Type())
|
|
||||||
}
|
|
||||||
return md.unifyAnything(data, rv)
|
|
||||||
case reflect.Float32:
|
|
||||||
fallthrough
|
|
||||||
case reflect.Float64:
|
|
||||||
return md.unifyFloat64(data, rv)
|
|
||||||
}
|
|
||||||
return e("unsupported type %s", rv.Kind())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
|
|
||||||
tmap, ok := mapping.(map[string]interface{})
|
|
||||||
if !ok {
|
|
||||||
if mapping == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return e("type mismatch for %s: expected table but found %T",
|
|
||||||
rv.Type().String(), mapping)
|
|
||||||
}
|
|
||||||
|
|
||||||
for key, datum := range tmap {
|
|
||||||
var f *field
|
|
||||||
fields := cachedTypeFields(rv.Type())
|
|
||||||
for i := range fields {
|
|
||||||
ff := &fields[i]
|
|
||||||
if ff.name == key {
|
|
||||||
f = ff
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if f == nil && strings.EqualFold(ff.name, key) {
|
|
||||||
f = ff
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if f != nil {
|
|
||||||
subv := rv
|
|
||||||
for _, i := range f.index {
|
|
||||||
subv = indirect(subv.Field(i))
|
|
||||||
}
|
|
||||||
if isUnifiable(subv) {
|
|
||||||
md.decoded[md.context.add(key).String()] = true
|
|
||||||
md.context = append(md.context, key)
|
|
||||||
if err := md.unify(datum, subv); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
md.context = md.context[0 : len(md.context)-1]
|
|
||||||
} else if f.name != "" {
|
|
||||||
// Bad user! No soup for you!
|
|
||||||
return e("cannot write unexported field %s.%s",
|
|
||||||
rv.Type().String(), f.name)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
|
|
||||||
tmap, ok := mapping.(map[string]interface{})
|
|
||||||
if !ok {
|
|
||||||
if tmap == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return badtype("map", mapping)
|
|
||||||
}
|
|
||||||
if rv.IsNil() {
|
|
||||||
rv.Set(reflect.MakeMap(rv.Type()))
|
|
||||||
}
|
|
||||||
for k, v := range tmap {
|
|
||||||
md.decoded[md.context.add(k).String()] = true
|
|
||||||
md.context = append(md.context, k)
|
|
||||||
|
|
||||||
rvkey := indirect(reflect.New(rv.Type().Key()))
|
|
||||||
rvval := reflect.Indirect(reflect.New(rv.Type().Elem()))
|
|
||||||
if err := md.unify(v, rvval); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
md.context = md.context[0 : len(md.context)-1]
|
|
||||||
|
|
||||||
rvkey.SetString(k)
|
|
||||||
rv.SetMapIndex(rvkey, rvval)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
|
|
||||||
datav := reflect.ValueOf(data)
|
|
||||||
if datav.Kind() != reflect.Slice {
|
|
||||||
if !datav.IsValid() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return badtype("slice", data)
|
|
||||||
}
|
|
||||||
sliceLen := datav.Len()
|
|
||||||
if sliceLen != rv.Len() {
|
|
||||||
return e("expected array length %d; got TOML array of length %d",
|
|
||||||
rv.Len(), sliceLen)
|
|
||||||
}
|
|
||||||
return md.unifySliceArray(datav, rv)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
|
|
||||||
datav := reflect.ValueOf(data)
|
|
||||||
if datav.Kind() != reflect.Slice {
|
|
||||||
if !datav.IsValid() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return badtype("slice", data)
|
|
||||||
}
|
|
||||||
n := datav.Len()
|
|
||||||
if rv.IsNil() || rv.Cap() < n {
|
|
||||||
rv.Set(reflect.MakeSlice(rv.Type(), n, n))
|
|
||||||
}
|
|
||||||
rv.SetLen(n)
|
|
||||||
return md.unifySliceArray(datav, rv)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
|
|
||||||
sliceLen := data.Len()
|
|
||||||
for i := 0; i < sliceLen; i++ {
|
|
||||||
v := data.Index(i).Interface()
|
|
||||||
sliceval := indirect(rv.Index(i))
|
|
||||||
if err := md.unify(v, sliceval); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error {
|
|
||||||
if _, ok := data.(time.Time); ok {
|
|
||||||
rv.Set(reflect.ValueOf(data))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return badtype("time.Time", data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
|
|
||||||
if s, ok := data.(string); ok {
|
|
||||||
rv.SetString(s)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return badtype("string", data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
|
|
||||||
if num, ok := data.(float64); ok {
|
|
||||||
switch rv.Kind() {
|
|
||||||
case reflect.Float32:
|
|
||||||
fallthrough
|
|
||||||
case reflect.Float64:
|
|
||||||
rv.SetFloat(num)
|
|
||||||
default:
|
|
||||||
panic("bug")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return badtype("float", data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
|
|
||||||
if num, ok := data.(int64); ok {
|
|
||||||
if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 {
|
|
||||||
switch rv.Kind() {
|
|
||||||
case reflect.Int, reflect.Int64:
|
|
||||||
// No bounds checking necessary.
|
|
||||||
case reflect.Int8:
|
|
||||||
if num < math.MinInt8 || num > math.MaxInt8 {
|
|
||||||
return e("value %d is out of range for int8", num)
|
|
||||||
}
|
|
||||||
case reflect.Int16:
|
|
||||||
if num < math.MinInt16 || num > math.MaxInt16 {
|
|
||||||
return e("value %d is out of range for int16", num)
|
|
||||||
}
|
|
||||||
case reflect.Int32:
|
|
||||||
if num < math.MinInt32 || num > math.MaxInt32 {
|
|
||||||
return e("value %d is out of range for int32", num)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
rv.SetInt(num)
|
|
||||||
} else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 {
|
|
||||||
unum := uint64(num)
|
|
||||||
switch rv.Kind() {
|
|
||||||
case reflect.Uint, reflect.Uint64:
|
|
||||||
// No bounds checking necessary.
|
|
||||||
case reflect.Uint8:
|
|
||||||
if num < 0 || unum > math.MaxUint8 {
|
|
||||||
return e("value %d is out of range for uint8", num)
|
|
||||||
}
|
|
||||||
case reflect.Uint16:
|
|
||||||
if num < 0 || unum > math.MaxUint16 {
|
|
||||||
return e("value %d is out of range for uint16", num)
|
|
||||||
}
|
|
||||||
case reflect.Uint32:
|
|
||||||
if num < 0 || unum > math.MaxUint32 {
|
|
||||||
return e("value %d is out of range for uint32", num)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
rv.SetUint(unum)
|
|
||||||
} else {
|
|
||||||
panic("unreachable")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return badtype("integer", data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
|
|
||||||
if b, ok := data.(bool); ok {
|
|
||||||
rv.SetBool(b)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return badtype("boolean", data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
|
|
||||||
rv.Set(reflect.ValueOf(data))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error {
|
|
||||||
var s string
|
|
||||||
switch sdata := data.(type) {
|
|
||||||
case TextMarshaler:
|
|
||||||
text, err := sdata.MarshalText()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
s = string(text)
|
|
||||||
case fmt.Stringer:
|
|
||||||
s = sdata.String()
|
|
||||||
case string:
|
|
||||||
s = sdata
|
|
||||||
case bool:
|
|
||||||
s = fmt.Sprintf("%v", sdata)
|
|
||||||
case int64:
|
|
||||||
s = fmt.Sprintf("%d", sdata)
|
|
||||||
case float64:
|
|
||||||
s = fmt.Sprintf("%f", sdata)
|
|
||||||
default:
|
|
||||||
return badtype("primitive (string-like)", data)
|
|
||||||
}
|
|
||||||
if err := v.UnmarshalText([]byte(s)); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// rvalue returns a reflect.Value of `v`. All pointers are resolved.
|
|
||||||
func rvalue(v interface{}) reflect.Value {
|
|
||||||
return indirect(reflect.ValueOf(v))
|
|
||||||
}
|
|
||||||
|
|
||||||
// indirect returns the value pointed to by a pointer.
|
|
||||||
// Pointers are followed until the value is not a pointer.
|
|
||||||
// New values are allocated for each nil pointer.
|
|
||||||
//
|
|
||||||
// An exception to this rule is if the value satisfies an interface of
|
|
||||||
// interest to us (like encoding.TextUnmarshaler).
|
|
||||||
func indirect(v reflect.Value) reflect.Value {
|
|
||||||
if v.Kind() != reflect.Ptr {
|
|
||||||
if v.CanSet() {
|
|
||||||
pv := v.Addr()
|
|
||||||
if _, ok := pv.Interface().(TextUnmarshaler); ok {
|
|
||||||
return pv
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
if v.IsNil() {
|
|
||||||
v.Set(reflect.New(v.Type().Elem()))
|
|
||||||
}
|
|
||||||
return indirect(reflect.Indirect(v))
|
|
||||||
}
|
|
||||||
|
|
||||||
func isUnifiable(rv reflect.Value) bool {
|
|
||||||
if rv.CanSet() {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if _, ok := rv.Interface().(TextUnmarshaler); ok {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func badtype(expected string, data interface{}) error {
|
|
||||||
return e("cannot load TOML value of type %T into a Go %s", data, expected)
|
|
||||||
}
|
|
121
vendor/github.com/BurntSushi/toml/decode_meta.go
generated
vendored
121
vendor/github.com/BurntSushi/toml/decode_meta.go
generated
vendored
|
@ -1,121 +0,0 @@
|
||||||
package toml
|
|
||||||
|
|
||||||
import "strings"
|
|
||||||
|
|
||||||
// MetaData allows access to meta information about TOML data that may not
|
|
||||||
// be inferrable via reflection. In particular, whether a key has been defined
|
|
||||||
// and the TOML type of a key.
|
|
||||||
type MetaData struct {
|
|
||||||
mapping map[string]interface{}
|
|
||||||
types map[string]tomlType
|
|
||||||
keys []Key
|
|
||||||
decoded map[string]bool
|
|
||||||
context Key // Used only during decoding.
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsDefined returns true if the key given exists in the TOML data. The key
|
|
||||||
// should be specified hierarchially. e.g.,
|
|
||||||
//
|
|
||||||
// // access the TOML key 'a.b.c'
|
|
||||||
// IsDefined("a", "b", "c")
|
|
||||||
//
|
|
||||||
// IsDefined will return false if an empty key given. Keys are case sensitive.
|
|
||||||
func (md *MetaData) IsDefined(key ...string) bool {
|
|
||||||
if len(key) == 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
var hash map[string]interface{}
|
|
||||||
var ok bool
|
|
||||||
var hashOrVal interface{} = md.mapping
|
|
||||||
for _, k := range key {
|
|
||||||
if hash, ok = hashOrVal.(map[string]interface{}); !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if hashOrVal, ok = hash[k]; !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type returns a string representation of the type of the key specified.
|
|
||||||
//
|
|
||||||
// Type will return the empty string if given an empty key or a key that
|
|
||||||
// does not exist. Keys are case sensitive.
|
|
||||||
func (md *MetaData) Type(key ...string) string {
|
|
||||||
fullkey := strings.Join(key, ".")
|
|
||||||
if typ, ok := md.types[fullkey]; ok {
|
|
||||||
return typ.typeString()
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// Key is the type of any TOML key, including key groups. Use (MetaData).Keys
|
|
||||||
// to get values of this type.
|
|
||||||
type Key []string
|
|
||||||
|
|
||||||
func (k Key) String() string {
|
|
||||||
return strings.Join(k, ".")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (k Key) maybeQuotedAll() string {
|
|
||||||
var ss []string
|
|
||||||
for i := range k {
|
|
||||||
ss = append(ss, k.maybeQuoted(i))
|
|
||||||
}
|
|
||||||
return strings.Join(ss, ".")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (k Key) maybeQuoted(i int) string {
|
|
||||||
quote := false
|
|
||||||
for _, c := range k[i] {
|
|
||||||
if !isBareKeyChar(c) {
|
|
||||||
quote = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if quote {
|
|
||||||
return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\""
|
|
||||||
}
|
|
||||||
return k[i]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (k Key) add(piece string) Key {
|
|
||||||
newKey := make(Key, len(k)+1)
|
|
||||||
copy(newKey, k)
|
|
||||||
newKey[len(k)] = piece
|
|
||||||
return newKey
|
|
||||||
}
|
|
||||||
|
|
||||||
// Keys returns a slice of every key in the TOML data, including key groups.
|
|
||||||
// Each key is itself a slice, where the first element is the top of the
|
|
||||||
// hierarchy and the last is the most specific.
|
|
||||||
//
|
|
||||||
// The list will have the same order as the keys appeared in the TOML data.
|
|
||||||
//
|
|
||||||
// All keys returned are non-empty.
|
|
||||||
func (md *MetaData) Keys() []Key {
|
|
||||||
return md.keys
|
|
||||||
}
|
|
||||||
|
|
||||||
// Undecoded returns all keys that have not been decoded in the order in which
|
|
||||||
// they appear in the original TOML document.
|
|
||||||
//
|
|
||||||
// This includes keys that haven't been decoded because of a Primitive value.
|
|
||||||
// Once the Primitive value is decoded, the keys will be considered decoded.
|
|
||||||
//
|
|
||||||
// Also note that decoding into an empty interface will result in no decoding,
|
|
||||||
// and so no keys will be considered decoded.
|
|
||||||
//
|
|
||||||
// In this sense, the Undecoded keys correspond to keys in the TOML document
|
|
||||||
// that do not have a concrete type in your representation.
|
|
||||||
func (md *MetaData) Undecoded() []Key {
|
|
||||||
undecoded := make([]Key, 0, len(md.keys))
|
|
||||||
for _, key := range md.keys {
|
|
||||||
if !md.decoded[key.String()] {
|
|
||||||
undecoded = append(undecoded, key)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return undecoded
|
|
||||||
}
|
|
27
vendor/github.com/BurntSushi/toml/doc.go
generated
vendored
27
vendor/github.com/BurntSushi/toml/doc.go
generated
vendored
|
@ -1,27 +0,0 @@
|
||||||
/*
|
|
||||||
Package toml provides facilities for decoding and encoding TOML configuration
|
|
||||||
files via reflection. There is also support for delaying decoding with
|
|
||||||
the Primitive type, and querying the set of keys in a TOML document with the
|
|
||||||
MetaData type.
|
|
||||||
|
|
||||||
The specification implemented: https://github.com/toml-lang/toml
|
|
||||||
|
|
||||||
The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify
|
|
||||||
whether a file is a valid TOML document. It can also be used to print the
|
|
||||||
type of each key in a TOML document.
|
|
||||||
|
|
||||||
Testing
|
|
||||||
|
|
||||||
There are two important types of tests used for this package. The first is
|
|
||||||
contained inside '*_test.go' files and uses the standard Go unit testing
|
|
||||||
framework. These tests are primarily devoted to holistically testing the
|
|
||||||
decoder and encoder.
|
|
||||||
|
|
||||||
The second type of testing is used to verify the implementation's adherence
|
|
||||||
to the TOML specification. These tests have been factored into their own
|
|
||||||
project: https://github.com/BurntSushi/toml-test
|
|
||||||
|
|
||||||
The reason the tests are in a separate project is so that they can be used by
|
|
||||||
any implementation of TOML. Namely, it is language agnostic.
|
|
||||||
*/
|
|
||||||
package toml
|
|
568
vendor/github.com/BurntSushi/toml/encode.go
generated
vendored
568
vendor/github.com/BurntSushi/toml/encode.go
generated
vendored
|
@ -1,568 +0,0 @@
|
||||||
package toml
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"reflect"
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
type tomlEncodeError struct{ error }
|
|
||||||
|
|
||||||
var (
|
|
||||||
errArrayMixedElementTypes = errors.New(
|
|
||||||
"toml: cannot encode array with mixed element types")
|
|
||||||
errArrayNilElement = errors.New(
|
|
||||||
"toml: cannot encode array with nil element")
|
|
||||||
errNonString = errors.New(
|
|
||||||
"toml: cannot encode a map with non-string key type")
|
|
||||||
errAnonNonStruct = errors.New(
|
|
||||||
"toml: cannot encode an anonymous field that is not a struct")
|
|
||||||
errArrayNoTable = errors.New(
|
|
||||||
"toml: TOML array element cannot contain a table")
|
|
||||||
errNoKey = errors.New(
|
|
||||||
"toml: top-level values must be Go maps or structs")
|
|
||||||
errAnything = errors.New("") // used in testing
|
|
||||||
)
|
|
||||||
|
|
||||||
var quotedReplacer = strings.NewReplacer(
|
|
||||||
"\t", "\\t",
|
|
||||||
"\n", "\\n",
|
|
||||||
"\r", "\\r",
|
|
||||||
"\"", "\\\"",
|
|
||||||
"\\", "\\\\",
|
|
||||||
)
|
|
||||||
|
|
||||||
// Encoder controls the encoding of Go values to a TOML document to some
|
|
||||||
// io.Writer.
|
|
||||||
//
|
|
||||||
// The indentation level can be controlled with the Indent field.
|
|
||||||
type Encoder struct {
|
|
||||||
// A single indentation level. By default it is two spaces.
|
|
||||||
Indent string
|
|
||||||
|
|
||||||
// hasWritten is whether we have written any output to w yet.
|
|
||||||
hasWritten bool
|
|
||||||
w *bufio.Writer
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer
|
|
||||||
// given. By default, a single indentation level is 2 spaces.
|
|
||||||
func NewEncoder(w io.Writer) *Encoder {
|
|
||||||
return &Encoder{
|
|
||||||
w: bufio.NewWriter(w),
|
|
||||||
Indent: " ",
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Encode writes a TOML representation of the Go value to the underlying
|
|
||||||
// io.Writer. If the value given cannot be encoded to a valid TOML document,
|
|
||||||
// then an error is returned.
|
|
||||||
//
|
|
||||||
// The mapping between Go values and TOML values should be precisely the same
|
|
||||||
// as for the Decode* functions. Similarly, the TextMarshaler interface is
|
|
||||||
// supported by encoding the resulting bytes as strings. (If you want to write
|
|
||||||
// arbitrary binary data then you will need to use something like base64 since
|
|
||||||
// TOML does not have any binary types.)
|
|
||||||
//
|
|
||||||
// When encoding TOML hashes (i.e., Go maps or structs), keys without any
|
|
||||||
// sub-hashes are encoded first.
|
|
||||||
//
|
|
||||||
// If a Go map is encoded, then its keys are sorted alphabetically for
|
|
||||||
// deterministic output. More control over this behavior may be provided if
|
|
||||||
// there is demand for it.
|
|
||||||
//
|
|
||||||
// Encoding Go values without a corresponding TOML representation---like map
|
|
||||||
// types with non-string keys---will cause an error to be returned. Similarly
|
|
||||||
// for mixed arrays/slices, arrays/slices with nil elements, embedded
|
|
||||||
// non-struct types and nested slices containing maps or structs.
|
|
||||||
// (e.g., [][]map[string]string is not allowed but []map[string]string is OK
|
|
||||||
// and so is []map[string][]string.)
|
|
||||||
func (enc *Encoder) Encode(v interface{}) error {
|
|
||||||
rv := eindirect(reflect.ValueOf(v))
|
|
||||||
if err := enc.safeEncode(Key([]string{}), rv); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return enc.w.Flush()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) {
|
|
||||||
defer func() {
|
|
||||||
if r := recover(); r != nil {
|
|
||||||
if terr, ok := r.(tomlEncodeError); ok {
|
|
||||||
err = terr.error
|
|
||||||
return
|
|
||||||
}
|
|
||||||
panic(r)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
enc.encode(key, rv)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (enc *Encoder) encode(key Key, rv reflect.Value) {
|
|
||||||
// Special case. Time needs to be in ISO8601 format.
|
|
||||||
// Special case. If we can marshal the type to text, then we used that.
|
|
||||||
// Basically, this prevents the encoder for handling these types as
|
|
||||||
// generic structs (or whatever the underlying type of a TextMarshaler is).
|
|
||||||
switch rv.Interface().(type) {
|
|
||||||
case time.Time, TextMarshaler:
|
|
||||||
enc.keyEqElement(key, rv)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
k := rv.Kind()
|
|
||||||
switch k {
|
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
|
|
||||||
reflect.Int64,
|
|
||||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
|
|
||||||
reflect.Uint64,
|
|
||||||
reflect.Float32, reflect.Float64, reflect.String, reflect.Bool:
|
|
||||||
enc.keyEqElement(key, rv)
|
|
||||||
case reflect.Array, reflect.Slice:
|
|
||||||
if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) {
|
|
||||||
enc.eArrayOfTables(key, rv)
|
|
||||||
} else {
|
|
||||||
enc.keyEqElement(key, rv)
|
|
||||||
}
|
|
||||||
case reflect.Interface:
|
|
||||||
if rv.IsNil() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
enc.encode(key, rv.Elem())
|
|
||||||
case reflect.Map:
|
|
||||||
if rv.IsNil() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
enc.eTable(key, rv)
|
|
||||||
case reflect.Ptr:
|
|
||||||
if rv.IsNil() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
enc.encode(key, rv.Elem())
|
|
||||||
case reflect.Struct:
|
|
||||||
enc.eTable(key, rv)
|
|
||||||
default:
|
|
||||||
panic(e("unsupported type for key '%s': %s", key, k))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// eElement encodes any value that can be an array element (primitives and
|
|
||||||
// arrays).
|
|
||||||
func (enc *Encoder) eElement(rv reflect.Value) {
|
|
||||||
switch v := rv.Interface().(type) {
|
|
||||||
case time.Time:
|
|
||||||
// Special case time.Time as a primitive. Has to come before
|
|
||||||
// TextMarshaler below because time.Time implements
|
|
||||||
// encoding.TextMarshaler, but we need to always use UTC.
|
|
||||||
enc.wf(v.UTC().Format("2006-01-02T15:04:05Z"))
|
|
||||||
return
|
|
||||||
case TextMarshaler:
|
|
||||||
// Special case. Use text marshaler if it's available for this value.
|
|
||||||
if s, err := v.MarshalText(); err != nil {
|
|
||||||
encPanic(err)
|
|
||||||
} else {
|
|
||||||
enc.writeQuoted(string(s))
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
switch rv.Kind() {
|
|
||||||
case reflect.Bool:
|
|
||||||
enc.wf(strconv.FormatBool(rv.Bool()))
|
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
|
|
||||||
reflect.Int64:
|
|
||||||
enc.wf(strconv.FormatInt(rv.Int(), 10))
|
|
||||||
case reflect.Uint, reflect.Uint8, reflect.Uint16,
|
|
||||||
reflect.Uint32, reflect.Uint64:
|
|
||||||
enc.wf(strconv.FormatUint(rv.Uint(), 10))
|
|
||||||
case reflect.Float32:
|
|
||||||
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32)))
|
|
||||||
case reflect.Float64:
|
|
||||||
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64)))
|
|
||||||
case reflect.Array, reflect.Slice:
|
|
||||||
enc.eArrayOrSliceElement(rv)
|
|
||||||
case reflect.Interface:
|
|
||||||
enc.eElement(rv.Elem())
|
|
||||||
case reflect.String:
|
|
||||||
enc.writeQuoted(rv.String())
|
|
||||||
default:
|
|
||||||
panic(e("unexpected primitive type: %s", rv.Kind()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// By the TOML spec, all floats must have a decimal with at least one
|
|
||||||
// number on either side.
|
|
||||||
func floatAddDecimal(fstr string) string {
|
|
||||||
if !strings.Contains(fstr, ".") {
|
|
||||||
return fstr + ".0"
|
|
||||||
}
|
|
||||||
return fstr
|
|
||||||
}
|
|
||||||
|
|
||||||
func (enc *Encoder) writeQuoted(s string) {
|
|
||||||
enc.wf("\"%s\"", quotedReplacer.Replace(s))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
|
|
||||||
length := rv.Len()
|
|
||||||
enc.wf("[")
|
|
||||||
for i := 0; i < length; i++ {
|
|
||||||
elem := rv.Index(i)
|
|
||||||
enc.eElement(elem)
|
|
||||||
if i != length-1 {
|
|
||||||
enc.wf(", ")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
enc.wf("]")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
|
|
||||||
if len(key) == 0 {
|
|
||||||
encPanic(errNoKey)
|
|
||||||
}
|
|
||||||
for i := 0; i < rv.Len(); i++ {
|
|
||||||
trv := rv.Index(i)
|
|
||||||
if isNil(trv) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
panicIfInvalidKey(key)
|
|
||||||
enc.newline()
|
|
||||||
enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll())
|
|
||||||
enc.newline()
|
|
||||||
enc.eMapOrStruct(key, trv)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (enc *Encoder) eTable(key Key, rv reflect.Value) {
|
|
||||||
panicIfInvalidKey(key)
|
|
||||||
if len(key) == 1 {
|
|
||||||
// Output an extra newline between top-level tables.
|
|
||||||
// (The newline isn't written if nothing else has been written though.)
|
|
||||||
enc.newline()
|
|
||||||
}
|
|
||||||
if len(key) > 0 {
|
|
||||||
enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll())
|
|
||||||
enc.newline()
|
|
||||||
}
|
|
||||||
enc.eMapOrStruct(key, rv)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) {
|
|
||||||
switch rv := eindirect(rv); rv.Kind() {
|
|
||||||
case reflect.Map:
|
|
||||||
enc.eMap(key, rv)
|
|
||||||
case reflect.Struct:
|
|
||||||
enc.eStruct(key, rv)
|
|
||||||
default:
|
|
||||||
panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (enc *Encoder) eMap(key Key, rv reflect.Value) {
|
|
||||||
rt := rv.Type()
|
|
||||||
if rt.Key().Kind() != reflect.String {
|
|
||||||
encPanic(errNonString)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sort keys so that we have deterministic output. And write keys directly
|
|
||||||
// underneath this key first, before writing sub-structs or sub-maps.
|
|
||||||
var mapKeysDirect, mapKeysSub []string
|
|
||||||
for _, mapKey := range rv.MapKeys() {
|
|
||||||
k := mapKey.String()
|
|
||||||
if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) {
|
|
||||||
mapKeysSub = append(mapKeysSub, k)
|
|
||||||
} else {
|
|
||||||
mapKeysDirect = append(mapKeysDirect, k)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var writeMapKeys = func(mapKeys []string) {
|
|
||||||
sort.Strings(mapKeys)
|
|
||||||
for _, mapKey := range mapKeys {
|
|
||||||
mrv := rv.MapIndex(reflect.ValueOf(mapKey))
|
|
||||||
if isNil(mrv) {
|
|
||||||
// Don't write anything for nil fields.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
enc.encode(key.add(mapKey), mrv)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
writeMapKeys(mapKeysDirect)
|
|
||||||
writeMapKeys(mapKeysSub)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
|
|
||||||
// Write keys for fields directly under this key first, because if we write
|
|
||||||
// a field that creates a new table, then all keys under it will be in that
|
|
||||||
// table (not the one we're writing here).
|
|
||||||
rt := rv.Type()
|
|
||||||
var fieldsDirect, fieldsSub [][]int
|
|
||||||
var addFields func(rt reflect.Type, rv reflect.Value, start []int)
|
|
||||||
addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
|
|
||||||
for i := 0; i < rt.NumField(); i++ {
|
|
||||||
f := rt.Field(i)
|
|
||||||
// skip unexported fields
|
|
||||||
if f.PkgPath != "" && !f.Anonymous {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
frv := rv.Field(i)
|
|
||||||
if f.Anonymous {
|
|
||||||
t := f.Type
|
|
||||||
switch t.Kind() {
|
|
||||||
case reflect.Struct:
|
|
||||||
// Treat anonymous struct fields with
|
|
||||||
// tag names as though they are not
|
|
||||||
// anonymous, like encoding/json does.
|
|
||||||
if getOptions(f.Tag).name == "" {
|
|
||||||
addFields(t, frv, f.Index)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
case reflect.Ptr:
|
|
||||||
if t.Elem().Kind() == reflect.Struct &&
|
|
||||||
getOptions(f.Tag).name == "" {
|
|
||||||
if !frv.IsNil() {
|
|
||||||
addFields(t.Elem(), frv.Elem(), f.Index)
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Fall through to the normal field encoding logic below
|
|
||||||
// for non-struct anonymous fields.
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if typeIsHash(tomlTypeOfGo(frv)) {
|
|
||||||
fieldsSub = append(fieldsSub, append(start, f.Index...))
|
|
||||||
} else {
|
|
||||||
fieldsDirect = append(fieldsDirect, append(start, f.Index...))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
addFields(rt, rv, nil)
|
|
||||||
|
|
||||||
var writeFields = func(fields [][]int) {
|
|
||||||
for _, fieldIndex := range fields {
|
|
||||||
sft := rt.FieldByIndex(fieldIndex)
|
|
||||||
sf := rv.FieldByIndex(fieldIndex)
|
|
||||||
if isNil(sf) {
|
|
||||||
// Don't write anything for nil fields.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
opts := getOptions(sft.Tag)
|
|
||||||
if opts.skip {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
keyName := sft.Name
|
|
||||||
if opts.name != "" {
|
|
||||||
keyName = opts.name
|
|
||||||
}
|
|
||||||
if opts.omitempty && isEmpty(sf) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if opts.omitzero && isZero(sf) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
enc.encode(key.add(keyName), sf)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
writeFields(fieldsDirect)
|
|
||||||
writeFields(fieldsSub)
|
|
||||||
}
|
|
||||||
|
|
||||||
// tomlTypeName returns the TOML type name of the Go value's type. It is
|
|
||||||
// used to determine whether the types of array elements are mixed (which is
|
|
||||||
// forbidden). If the Go value is nil, then it is illegal for it to be an array
|
|
||||||
// element, and valueIsNil is returned as true.
|
|
||||||
|
|
||||||
// Returns the TOML type of a Go value. The type may be `nil`, which means
|
|
||||||
// no concrete TOML type could be found.
|
|
||||||
func tomlTypeOfGo(rv reflect.Value) tomlType {
|
|
||||||
if isNil(rv) || !rv.IsValid() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
switch rv.Kind() {
|
|
||||||
case reflect.Bool:
|
|
||||||
return tomlBool
|
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
|
|
||||||
reflect.Int64,
|
|
||||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
|
|
||||||
reflect.Uint64:
|
|
||||||
return tomlInteger
|
|
||||||
case reflect.Float32, reflect.Float64:
|
|
||||||
return tomlFloat
|
|
||||||
case reflect.Array, reflect.Slice:
|
|
||||||
if typeEqual(tomlHash, tomlArrayType(rv)) {
|
|
||||||
return tomlArrayHash
|
|
||||||
}
|
|
||||||
return tomlArray
|
|
||||||
case reflect.Ptr, reflect.Interface:
|
|
||||||
return tomlTypeOfGo(rv.Elem())
|
|
||||||
case reflect.String:
|
|
||||||
return tomlString
|
|
||||||
case reflect.Map:
|
|
||||||
return tomlHash
|
|
||||||
case reflect.Struct:
|
|
||||||
switch rv.Interface().(type) {
|
|
||||||
case time.Time:
|
|
||||||
return tomlDatetime
|
|
||||||
case TextMarshaler:
|
|
||||||
return tomlString
|
|
||||||
default:
|
|
||||||
return tomlHash
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
panic("unexpected reflect.Kind: " + rv.Kind().String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// tomlArrayType returns the element type of a TOML array. The type returned
|
|
||||||
// may be nil if it cannot be determined (e.g., a nil slice or a zero length
|
|
||||||
// slize). This function may also panic if it finds a type that cannot be
|
|
||||||
// expressed in TOML (such as nil elements, heterogeneous arrays or directly
|
|
||||||
// nested arrays of tables).
|
|
||||||
func tomlArrayType(rv reflect.Value) tomlType {
|
|
||||||
if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
firstType := tomlTypeOfGo(rv.Index(0))
|
|
||||||
if firstType == nil {
|
|
||||||
encPanic(errArrayNilElement)
|
|
||||||
}
|
|
||||||
|
|
||||||
rvlen := rv.Len()
|
|
||||||
for i := 1; i < rvlen; i++ {
|
|
||||||
elem := rv.Index(i)
|
|
||||||
switch elemType := tomlTypeOfGo(elem); {
|
|
||||||
case elemType == nil:
|
|
||||||
encPanic(errArrayNilElement)
|
|
||||||
case !typeEqual(firstType, elemType):
|
|
||||||
encPanic(errArrayMixedElementTypes)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// If we have a nested array, then we must make sure that the nested
|
|
||||||
// array contains ONLY primitives.
|
|
||||||
// This checks arbitrarily nested arrays.
|
|
||||||
if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) {
|
|
||||||
nest := tomlArrayType(eindirect(rv.Index(0)))
|
|
||||||
if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) {
|
|
||||||
encPanic(errArrayNoTable)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return firstType
|
|
||||||
}
|
|
||||||
|
|
||||||
type tagOptions struct {
|
|
||||||
skip bool // "-"
|
|
||||||
name string
|
|
||||||
omitempty bool
|
|
||||||
omitzero bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func getOptions(tag reflect.StructTag) tagOptions {
|
|
||||||
t := tag.Get("toml")
|
|
||||||
if t == "-" {
|
|
||||||
return tagOptions{skip: true}
|
|
||||||
}
|
|
||||||
var opts tagOptions
|
|
||||||
parts := strings.Split(t, ",")
|
|
||||||
opts.name = parts[0]
|
|
||||||
for _, s := range parts[1:] {
|
|
||||||
switch s {
|
|
||||||
case "omitempty":
|
|
||||||
opts.omitempty = true
|
|
||||||
case "omitzero":
|
|
||||||
opts.omitzero = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return opts
|
|
||||||
}
|
|
||||||
|
|
||||||
func isZero(rv reflect.Value) bool {
|
|
||||||
switch rv.Kind() {
|
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
|
||||||
return rv.Int() == 0
|
|
||||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
|
||||||
return rv.Uint() == 0
|
|
||||||
case reflect.Float32, reflect.Float64:
|
|
||||||
return rv.Float() == 0.0
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func isEmpty(rv reflect.Value) bool {
|
|
||||||
switch rv.Kind() {
|
|
||||||
case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
|
|
||||||
return rv.Len() == 0
|
|
||||||
case reflect.Bool:
|
|
||||||
return !rv.Bool()
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (enc *Encoder) newline() {
|
|
||||||
if enc.hasWritten {
|
|
||||||
enc.wf("\n")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (enc *Encoder) keyEqElement(key Key, val reflect.Value) {
|
|
||||||
if len(key) == 0 {
|
|
||||||
encPanic(errNoKey)
|
|
||||||
}
|
|
||||||
panicIfInvalidKey(key)
|
|
||||||
enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
|
|
||||||
enc.eElement(val)
|
|
||||||
enc.newline()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (enc *Encoder) wf(format string, v ...interface{}) {
|
|
||||||
if _, err := fmt.Fprintf(enc.w, format, v...); err != nil {
|
|
||||||
encPanic(err)
|
|
||||||
}
|
|
||||||
enc.hasWritten = true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (enc *Encoder) indentStr(key Key) string {
|
|
||||||
return strings.Repeat(enc.Indent, len(key)-1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func encPanic(err error) {
|
|
||||||
panic(tomlEncodeError{err})
|
|
||||||
}
|
|
||||||
|
|
||||||
func eindirect(v reflect.Value) reflect.Value {
|
|
||||||
switch v.Kind() {
|
|
||||||
case reflect.Ptr, reflect.Interface:
|
|
||||||
return eindirect(v.Elem())
|
|
||||||
default:
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func isNil(rv reflect.Value) bool {
|
|
||||||
switch rv.Kind() {
|
|
||||||
case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
|
|
||||||
return rv.IsNil()
|
|
||||||
default:
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func panicIfInvalidKey(key Key) {
|
|
||||||
for _, k := range key {
|
|
||||||
if len(k) == 0 {
|
|
||||||
encPanic(e("Key '%s' is not a valid table name. Key names "+
|
|
||||||
"cannot be empty.", key.maybeQuotedAll()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func isValidKeyName(s string) bool {
|
|
||||||
return len(s) != 0
|
|
||||||
}
|
|
19
vendor/github.com/BurntSushi/toml/encoding_types.go
generated
vendored
19
vendor/github.com/BurntSushi/toml/encoding_types.go
generated
vendored
|
@ -1,19 +0,0 @@
|
||||||
// +build go1.2
|
|
||||||
|
|
||||||
package toml
|
|
||||||
|
|
||||||
// In order to support Go 1.1, we define our own TextMarshaler and
|
|
||||||
// TextUnmarshaler types. For Go 1.2+, we just alias them with the
|
|
||||||
// standard library interfaces.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
|
|
||||||
// so that Go 1.1 can be supported.
|
|
||||||
type TextMarshaler encoding.TextMarshaler
|
|
||||||
|
|
||||||
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
|
|
||||||
// here so that Go 1.1 can be supported.
|
|
||||||
type TextUnmarshaler encoding.TextUnmarshaler
|
|
18
vendor/github.com/BurntSushi/toml/encoding_types_1.1.go
generated
vendored
18
vendor/github.com/BurntSushi/toml/encoding_types_1.1.go
generated
vendored
|
@ -1,18 +0,0 @@
|
||||||
// +build !go1.2
|
|
||||||
|
|
||||||
package toml
|
|
||||||
|
|
||||||
// These interfaces were introduced in Go 1.2, so we add them manually when
|
|
||||||
// compiling for Go 1.1.
|
|
||||||
|
|
||||||
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
|
|
||||||
// so that Go 1.1 can be supported.
|
|
||||||
type TextMarshaler interface {
|
|
||||||
MarshalText() (text []byte, err error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
|
|
||||||
// here so that Go 1.1 can be supported.
|
|
||||||
type TextUnmarshaler interface {
|
|
||||||
UnmarshalText(text []byte) error
|
|
||||||
}
|
|
953
vendor/github.com/BurntSushi/toml/lex.go
generated
vendored
953
vendor/github.com/BurntSushi/toml/lex.go
generated
vendored
|
@ -1,953 +0,0 @@
|
||||||
package toml
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
"unicode"
|
|
||||||
"unicode/utf8"
|
|
||||||
)
|
|
||||||
|
|
||||||
type itemType int
|
|
||||||
|
|
||||||
const (
|
|
||||||
itemError itemType = iota
|
|
||||||
itemNIL // used in the parser to indicate no type
|
|
||||||
itemEOF
|
|
||||||
itemText
|
|
||||||
itemString
|
|
||||||
itemRawString
|
|
||||||
itemMultilineString
|
|
||||||
itemRawMultilineString
|
|
||||||
itemBool
|
|
||||||
itemInteger
|
|
||||||
itemFloat
|
|
||||||
itemDatetime
|
|
||||||
itemArray // the start of an array
|
|
||||||
itemArrayEnd
|
|
||||||
itemTableStart
|
|
||||||
itemTableEnd
|
|
||||||
itemArrayTableStart
|
|
||||||
itemArrayTableEnd
|
|
||||||
itemKeyStart
|
|
||||||
itemCommentStart
|
|
||||||
itemInlineTableStart
|
|
||||||
itemInlineTableEnd
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
eof = 0
|
|
||||||
comma = ','
|
|
||||||
tableStart = '['
|
|
||||||
tableEnd = ']'
|
|
||||||
arrayTableStart = '['
|
|
||||||
arrayTableEnd = ']'
|
|
||||||
tableSep = '.'
|
|
||||||
keySep = '='
|
|
||||||
arrayStart = '['
|
|
||||||
arrayEnd = ']'
|
|
||||||
commentStart = '#'
|
|
||||||
stringStart = '"'
|
|
||||||
stringEnd = '"'
|
|
||||||
rawStringStart = '\''
|
|
||||||
rawStringEnd = '\''
|
|
||||||
inlineTableStart = '{'
|
|
||||||
inlineTableEnd = '}'
|
|
||||||
)
|
|
||||||
|
|
||||||
type stateFn func(lx *lexer) stateFn
|
|
||||||
|
|
||||||
type lexer struct {
|
|
||||||
input string
|
|
||||||
start int
|
|
||||||
pos int
|
|
||||||
line int
|
|
||||||
state stateFn
|
|
||||||
items chan item
|
|
||||||
|
|
||||||
// Allow for backing up up to three runes.
|
|
||||||
// This is necessary because TOML contains 3-rune tokens (""" and ''').
|
|
||||||
prevWidths [3]int
|
|
||||||
nprev int // how many of prevWidths are in use
|
|
||||||
// If we emit an eof, we can still back up, but it is not OK to call
|
|
||||||
// next again.
|
|
||||||
atEOF bool
|
|
||||||
|
|
||||||
// A stack of state functions used to maintain context.
|
|
||||||
// The idea is to reuse parts of the state machine in various places.
|
|
||||||
// For example, values can appear at the top level or within arbitrarily
|
|
||||||
// nested arrays. The last state on the stack is used after a value has
|
|
||||||
// been lexed. Similarly for comments.
|
|
||||||
stack []stateFn
|
|
||||||
}
|
|
||||||
|
|
||||||
type item struct {
|
|
||||||
typ itemType
|
|
||||||
val string
|
|
||||||
line int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lx *lexer) nextItem() item {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case item := <-lx.items:
|
|
||||||
return item
|
|
||||||
default:
|
|
||||||
lx.state = lx.state(lx)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func lex(input string) *lexer {
|
|
||||||
lx := &lexer{
|
|
||||||
input: input,
|
|
||||||
state: lexTop,
|
|
||||||
line: 1,
|
|
||||||
items: make(chan item, 10),
|
|
||||||
stack: make([]stateFn, 0, 10),
|
|
||||||
}
|
|
||||||
return lx
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lx *lexer) push(state stateFn) {
|
|
||||||
lx.stack = append(lx.stack, state)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lx *lexer) pop() stateFn {
|
|
||||||
if len(lx.stack) == 0 {
|
|
||||||
return lx.errorf("BUG in lexer: no states to pop")
|
|
||||||
}
|
|
||||||
last := lx.stack[len(lx.stack)-1]
|
|
||||||
lx.stack = lx.stack[0 : len(lx.stack)-1]
|
|
||||||
return last
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lx *lexer) current() string {
|
|
||||||
return lx.input[lx.start:lx.pos]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lx *lexer) emit(typ itemType) {
|
|
||||||
lx.items <- item{typ, lx.current(), lx.line}
|
|
||||||
lx.start = lx.pos
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lx *lexer) emitTrim(typ itemType) {
|
|
||||||
lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line}
|
|
||||||
lx.start = lx.pos
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lx *lexer) next() (r rune) {
|
|
||||||
if lx.atEOF {
|
|
||||||
panic("next called after EOF")
|
|
||||||
}
|
|
||||||
if lx.pos >= len(lx.input) {
|
|
||||||
lx.atEOF = true
|
|
||||||
return eof
|
|
||||||
}
|
|
||||||
|
|
||||||
if lx.input[lx.pos] == '\n' {
|
|
||||||
lx.line++
|
|
||||||
}
|
|
||||||
lx.prevWidths[2] = lx.prevWidths[1]
|
|
||||||
lx.prevWidths[1] = lx.prevWidths[0]
|
|
||||||
if lx.nprev < 3 {
|
|
||||||
lx.nprev++
|
|
||||||
}
|
|
||||||
r, w := utf8.DecodeRuneInString(lx.input[lx.pos:])
|
|
||||||
lx.prevWidths[0] = w
|
|
||||||
lx.pos += w
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
// ignore skips over the pending input before this point.
|
|
||||||
func (lx *lexer) ignore() {
|
|
||||||
lx.start = lx.pos
|
|
||||||
}
|
|
||||||
|
|
||||||
// backup steps back one rune. Can be called only twice between calls to next.
|
|
||||||
func (lx *lexer) backup() {
|
|
||||||
if lx.atEOF {
|
|
||||||
lx.atEOF = false
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if lx.nprev < 1 {
|
|
||||||
panic("backed up too far")
|
|
||||||
}
|
|
||||||
w := lx.prevWidths[0]
|
|
||||||
lx.prevWidths[0] = lx.prevWidths[1]
|
|
||||||
lx.prevWidths[1] = lx.prevWidths[2]
|
|
||||||
lx.nprev--
|
|
||||||
lx.pos -= w
|
|
||||||
if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
|
|
||||||
lx.line--
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// accept consumes the next rune if it's equal to `valid`.
|
|
||||||
func (lx *lexer) accept(valid rune) bool {
|
|
||||||
if lx.next() == valid {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
lx.backup()
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// peek returns but does not consume the next rune in the input.
|
|
||||||
func (lx *lexer) peek() rune {
|
|
||||||
r := lx.next()
|
|
||||||
lx.backup()
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
// skip ignores all input that matches the given predicate.
|
|
||||||
func (lx *lexer) skip(pred func(rune) bool) {
|
|
||||||
for {
|
|
||||||
r := lx.next()
|
|
||||||
if pred(r) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
lx.backup()
|
|
||||||
lx.ignore()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// errorf stops all lexing by emitting an error and returning `nil`.
|
|
||||||
// Note that any value that is a character is escaped if it's a special
|
|
||||||
// character (newlines, tabs, etc.).
|
|
||||||
func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
|
|
||||||
lx.items <- item{
|
|
||||||
itemError,
|
|
||||||
fmt.Sprintf(format, values...),
|
|
||||||
lx.line,
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexTop consumes elements at the top level of TOML data.
|
|
||||||
func lexTop(lx *lexer) stateFn {
|
|
||||||
r := lx.next()
|
|
||||||
if isWhitespace(r) || isNL(r) {
|
|
||||||
return lexSkip(lx, lexTop)
|
|
||||||
}
|
|
||||||
switch r {
|
|
||||||
case commentStart:
|
|
||||||
lx.push(lexTop)
|
|
||||||
return lexCommentStart
|
|
||||||
case tableStart:
|
|
||||||
return lexTableStart
|
|
||||||
case eof:
|
|
||||||
if lx.pos > lx.start {
|
|
||||||
return lx.errorf("unexpected EOF")
|
|
||||||
}
|
|
||||||
lx.emit(itemEOF)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// At this point, the only valid item can be a key, so we back up
|
|
||||||
// and let the key lexer do the rest.
|
|
||||||
lx.backup()
|
|
||||||
lx.push(lexTopEnd)
|
|
||||||
return lexKeyStart
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexTopEnd is entered whenever a top-level item has been consumed. (A value
|
|
||||||
// or a table.) It must see only whitespace, and will turn back to lexTop
|
|
||||||
// upon a newline. If it sees EOF, it will quit the lexer successfully.
|
|
||||||
func lexTopEnd(lx *lexer) stateFn {
|
|
||||||
r := lx.next()
|
|
||||||
switch {
|
|
||||||
case r == commentStart:
|
|
||||||
// a comment will read to a newline for us.
|
|
||||||
lx.push(lexTop)
|
|
||||||
return lexCommentStart
|
|
||||||
case isWhitespace(r):
|
|
||||||
return lexTopEnd
|
|
||||||
case isNL(r):
|
|
||||||
lx.ignore()
|
|
||||||
return lexTop
|
|
||||||
case r == eof:
|
|
||||||
lx.emit(itemEOF)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return lx.errorf("expected a top-level item to end with a newline, "+
|
|
||||||
"comment, or EOF, but got %q instead", r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexTable lexes the beginning of a table. Namely, it makes sure that
|
|
||||||
// it starts with a character other than '.' and ']'.
|
|
||||||
// It assumes that '[' has already been consumed.
|
|
||||||
// It also handles the case that this is an item in an array of tables.
|
|
||||||
// e.g., '[[name]]'.
|
|
||||||
func lexTableStart(lx *lexer) stateFn {
|
|
||||||
if lx.peek() == arrayTableStart {
|
|
||||||
lx.next()
|
|
||||||
lx.emit(itemArrayTableStart)
|
|
||||||
lx.push(lexArrayTableEnd)
|
|
||||||
} else {
|
|
||||||
lx.emit(itemTableStart)
|
|
||||||
lx.push(lexTableEnd)
|
|
||||||
}
|
|
||||||
return lexTableNameStart
|
|
||||||
}
|
|
||||||
|
|
||||||
func lexTableEnd(lx *lexer) stateFn {
|
|
||||||
lx.emit(itemTableEnd)
|
|
||||||
return lexTopEnd
|
|
||||||
}
|
|
||||||
|
|
||||||
func lexArrayTableEnd(lx *lexer) stateFn {
|
|
||||||
if r := lx.next(); r != arrayTableEnd {
|
|
||||||
return lx.errorf("expected end of table array name delimiter %q, "+
|
|
||||||
"but got %q instead", arrayTableEnd, r)
|
|
||||||
}
|
|
||||||
lx.emit(itemArrayTableEnd)
|
|
||||||
return lexTopEnd
|
|
||||||
}
|
|
||||||
|
|
||||||
func lexTableNameStart(lx *lexer) stateFn {
|
|
||||||
lx.skip(isWhitespace)
|
|
||||||
switch r := lx.peek(); {
|
|
||||||
case r == tableEnd || r == eof:
|
|
||||||
return lx.errorf("unexpected end of table name " +
|
|
||||||
"(table names cannot be empty)")
|
|
||||||
case r == tableSep:
|
|
||||||
return lx.errorf("unexpected table separator " +
|
|
||||||
"(table names cannot be empty)")
|
|
||||||
case r == stringStart || r == rawStringStart:
|
|
||||||
lx.ignore()
|
|
||||||
lx.push(lexTableNameEnd)
|
|
||||||
return lexValue // reuse string lexing
|
|
||||||
default:
|
|
||||||
return lexBareTableName
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexBareTableName lexes the name of a table. It assumes that at least one
|
|
||||||
// valid character for the table has already been read.
|
|
||||||
func lexBareTableName(lx *lexer) stateFn {
|
|
||||||
r := lx.next()
|
|
||||||
if isBareKeyChar(r) {
|
|
||||||
return lexBareTableName
|
|
||||||
}
|
|
||||||
lx.backup()
|
|
||||||
lx.emit(itemText)
|
|
||||||
return lexTableNameEnd
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexTableNameEnd reads the end of a piece of a table name, optionally
|
|
||||||
// consuming whitespace.
|
|
||||||
func lexTableNameEnd(lx *lexer) stateFn {
|
|
||||||
lx.skip(isWhitespace)
|
|
||||||
switch r := lx.next(); {
|
|
||||||
case isWhitespace(r):
|
|
||||||
return lexTableNameEnd
|
|
||||||
case r == tableSep:
|
|
||||||
lx.ignore()
|
|
||||||
return lexTableNameStart
|
|
||||||
case r == tableEnd:
|
|
||||||
return lx.pop()
|
|
||||||
default:
|
|
||||||
return lx.errorf("expected '.' or ']' to end table name, "+
|
|
||||||
"but got %q instead", r)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexKeyStart consumes a key name up until the first non-whitespace character.
|
|
||||||
// lexKeyStart will ignore whitespace.
|
|
||||||
func lexKeyStart(lx *lexer) stateFn {
|
|
||||||
r := lx.peek()
|
|
||||||
switch {
|
|
||||||
case r == keySep:
|
|
||||||
return lx.errorf("unexpected key separator %q", keySep)
|
|
||||||
case isWhitespace(r) || isNL(r):
|
|
||||||
lx.next()
|
|
||||||
return lexSkip(lx, lexKeyStart)
|
|
||||||
case r == stringStart || r == rawStringStart:
|
|
||||||
lx.ignore()
|
|
||||||
lx.emit(itemKeyStart)
|
|
||||||
lx.push(lexKeyEnd)
|
|
||||||
return lexValue // reuse string lexing
|
|
||||||
default:
|
|
||||||
lx.ignore()
|
|
||||||
lx.emit(itemKeyStart)
|
|
||||||
return lexBareKey
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexBareKey consumes the text of a bare key. Assumes that the first character
|
|
||||||
// (which is not whitespace) has not yet been consumed.
|
|
||||||
func lexBareKey(lx *lexer) stateFn {
|
|
||||||
switch r := lx.next(); {
|
|
||||||
case isBareKeyChar(r):
|
|
||||||
return lexBareKey
|
|
||||||
case isWhitespace(r):
|
|
||||||
lx.backup()
|
|
||||||
lx.emit(itemText)
|
|
||||||
return lexKeyEnd
|
|
||||||
case r == keySep:
|
|
||||||
lx.backup()
|
|
||||||
lx.emit(itemText)
|
|
||||||
return lexKeyEnd
|
|
||||||
default:
|
|
||||||
return lx.errorf("bare keys cannot contain %q", r)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexKeyEnd consumes the end of a key and trims whitespace (up to the key
|
|
||||||
// separator).
|
|
||||||
func lexKeyEnd(lx *lexer) stateFn {
|
|
||||||
switch r := lx.next(); {
|
|
||||||
case r == keySep:
|
|
||||||
return lexSkip(lx, lexValue)
|
|
||||||
case isWhitespace(r):
|
|
||||||
return lexSkip(lx, lexKeyEnd)
|
|
||||||
default:
|
|
||||||
return lx.errorf("expected key separator %q, but got %q instead",
|
|
||||||
keySep, r)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexValue starts the consumption of a value anywhere a value is expected.
|
|
||||||
// lexValue will ignore whitespace.
|
|
||||||
// After a value is lexed, the last state on the next is popped and returned.
|
|
||||||
func lexValue(lx *lexer) stateFn {
|
|
||||||
// We allow whitespace to precede a value, but NOT newlines.
|
|
||||||
// In array syntax, the array states are responsible for ignoring newlines.
|
|
||||||
r := lx.next()
|
|
||||||
switch {
|
|
||||||
case isWhitespace(r):
|
|
||||||
return lexSkip(lx, lexValue)
|
|
||||||
case isDigit(r):
|
|
||||||
lx.backup() // avoid an extra state and use the same as above
|
|
||||||
return lexNumberOrDateStart
|
|
||||||
}
|
|
||||||
switch r {
|
|
||||||
case arrayStart:
|
|
||||||
lx.ignore()
|
|
||||||
lx.emit(itemArray)
|
|
||||||
return lexArrayValue
|
|
||||||
case inlineTableStart:
|
|
||||||
lx.ignore()
|
|
||||||
lx.emit(itemInlineTableStart)
|
|
||||||
return lexInlineTableValue
|
|
||||||
case stringStart:
|
|
||||||
if lx.accept(stringStart) {
|
|
||||||
if lx.accept(stringStart) {
|
|
||||||
lx.ignore() // Ignore """
|
|
||||||
return lexMultilineString
|
|
||||||
}
|
|
||||||
lx.backup()
|
|
||||||
}
|
|
||||||
lx.ignore() // ignore the '"'
|
|
||||||
return lexString
|
|
||||||
case rawStringStart:
|
|
||||||
if lx.accept(rawStringStart) {
|
|
||||||
if lx.accept(rawStringStart) {
|
|
||||||
lx.ignore() // Ignore """
|
|
||||||
return lexMultilineRawString
|
|
||||||
}
|
|
||||||
lx.backup()
|
|
||||||
}
|
|
||||||
lx.ignore() // ignore the "'"
|
|
||||||
return lexRawString
|
|
||||||
case '+', '-':
|
|
||||||
return lexNumberStart
|
|
||||||
case '.': // special error case, be kind to users
|
|
||||||
return lx.errorf("floats must start with a digit, not '.'")
|
|
||||||
}
|
|
||||||
if unicode.IsLetter(r) {
|
|
||||||
// Be permissive here; lexBool will give a nice error if the
|
|
||||||
// user wrote something like
|
|
||||||
// x = foo
|
|
||||||
// (i.e. not 'true' or 'false' but is something else word-like.)
|
|
||||||
lx.backup()
|
|
||||||
return lexBool
|
|
||||||
}
|
|
||||||
return lx.errorf("expected value but found %q instead", r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexArrayValue consumes one value in an array. It assumes that '[' or ','
|
|
||||||
// have already been consumed. All whitespace and newlines are ignored.
|
|
||||||
func lexArrayValue(lx *lexer) stateFn {
|
|
||||||
r := lx.next()
|
|
||||||
switch {
|
|
||||||
case isWhitespace(r) || isNL(r):
|
|
||||||
return lexSkip(lx, lexArrayValue)
|
|
||||||
case r == commentStart:
|
|
||||||
lx.push(lexArrayValue)
|
|
||||||
return lexCommentStart
|
|
||||||
case r == comma:
|
|
||||||
return lx.errorf("unexpected comma")
|
|
||||||
case r == arrayEnd:
|
|
||||||
// NOTE(caleb): The spec isn't clear about whether you can have
|
|
||||||
// a trailing comma or not, so we'll allow it.
|
|
||||||
return lexArrayEnd
|
|
||||||
}
|
|
||||||
|
|
||||||
lx.backup()
|
|
||||||
lx.push(lexArrayValueEnd)
|
|
||||||
return lexValue
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexArrayValueEnd consumes everything between the end of an array value and
|
|
||||||
// the next value (or the end of the array): it ignores whitespace and newlines
|
|
||||||
// and expects either a ',' or a ']'.
|
|
||||||
func lexArrayValueEnd(lx *lexer) stateFn {
|
|
||||||
r := lx.next()
|
|
||||||
switch {
|
|
||||||
case isWhitespace(r) || isNL(r):
|
|
||||||
return lexSkip(lx, lexArrayValueEnd)
|
|
||||||
case r == commentStart:
|
|
||||||
lx.push(lexArrayValueEnd)
|
|
||||||
return lexCommentStart
|
|
||||||
case r == comma:
|
|
||||||
lx.ignore()
|
|
||||||
return lexArrayValue // move on to the next value
|
|
||||||
case r == arrayEnd:
|
|
||||||
return lexArrayEnd
|
|
||||||
}
|
|
||||||
return lx.errorf(
|
|
||||||
"expected a comma or array terminator %q, but got %q instead",
|
|
||||||
arrayEnd, r,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexArrayEnd finishes the lexing of an array.
|
|
||||||
// It assumes that a ']' has just been consumed.
|
|
||||||
func lexArrayEnd(lx *lexer) stateFn {
|
|
||||||
lx.ignore()
|
|
||||||
lx.emit(itemArrayEnd)
|
|
||||||
return lx.pop()
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexInlineTableValue consumes one key/value pair in an inline table.
|
|
||||||
// It assumes that '{' or ',' have already been consumed. Whitespace is ignored.
|
|
||||||
func lexInlineTableValue(lx *lexer) stateFn {
|
|
||||||
r := lx.next()
|
|
||||||
switch {
|
|
||||||
case isWhitespace(r):
|
|
||||||
return lexSkip(lx, lexInlineTableValue)
|
|
||||||
case isNL(r):
|
|
||||||
return lx.errorf("newlines not allowed within inline tables")
|
|
||||||
case r == commentStart:
|
|
||||||
lx.push(lexInlineTableValue)
|
|
||||||
return lexCommentStart
|
|
||||||
case r == comma:
|
|
||||||
return lx.errorf("unexpected comma")
|
|
||||||
case r == inlineTableEnd:
|
|
||||||
return lexInlineTableEnd
|
|
||||||
}
|
|
||||||
lx.backup()
|
|
||||||
lx.push(lexInlineTableValueEnd)
|
|
||||||
return lexKeyStart
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexInlineTableValueEnd consumes everything between the end of an inline table
|
|
||||||
// key/value pair and the next pair (or the end of the table):
|
|
||||||
// it ignores whitespace and expects either a ',' or a '}'.
|
|
||||||
func lexInlineTableValueEnd(lx *lexer) stateFn {
|
|
||||||
r := lx.next()
|
|
||||||
switch {
|
|
||||||
case isWhitespace(r):
|
|
||||||
return lexSkip(lx, lexInlineTableValueEnd)
|
|
||||||
case isNL(r):
|
|
||||||
return lx.errorf("newlines not allowed within inline tables")
|
|
||||||
case r == commentStart:
|
|
||||||
lx.push(lexInlineTableValueEnd)
|
|
||||||
return lexCommentStart
|
|
||||||
case r == comma:
|
|
||||||
lx.ignore()
|
|
||||||
return lexInlineTableValue
|
|
||||||
case r == inlineTableEnd:
|
|
||||||
return lexInlineTableEnd
|
|
||||||
}
|
|
||||||
return lx.errorf("expected a comma or an inline table terminator %q, "+
|
|
||||||
"but got %q instead", inlineTableEnd, r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexInlineTableEnd finishes the lexing of an inline table.
|
|
||||||
// It assumes that a '}' has just been consumed.
|
|
||||||
func lexInlineTableEnd(lx *lexer) stateFn {
|
|
||||||
lx.ignore()
|
|
||||||
lx.emit(itemInlineTableEnd)
|
|
||||||
return lx.pop()
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexString consumes the inner contents of a string. It assumes that the
|
|
||||||
// beginning '"' has already been consumed and ignored.
|
|
||||||
func lexString(lx *lexer) stateFn {
|
|
||||||
r := lx.next()
|
|
||||||
switch {
|
|
||||||
case r == eof:
|
|
||||||
return lx.errorf("unexpected EOF")
|
|
||||||
case isNL(r):
|
|
||||||
return lx.errorf("strings cannot contain newlines")
|
|
||||||
case r == '\\':
|
|
||||||
lx.push(lexString)
|
|
||||||
return lexStringEscape
|
|
||||||
case r == stringEnd:
|
|
||||||
lx.backup()
|
|
||||||
lx.emit(itemString)
|
|
||||||
lx.next()
|
|
||||||
lx.ignore()
|
|
||||||
return lx.pop()
|
|
||||||
}
|
|
||||||
return lexString
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexMultilineString consumes the inner contents of a string. It assumes that
|
|
||||||
// the beginning '"""' has already been consumed and ignored.
|
|
||||||
func lexMultilineString(lx *lexer) stateFn {
|
|
||||||
switch lx.next() {
|
|
||||||
case eof:
|
|
||||||
return lx.errorf("unexpected EOF")
|
|
||||||
case '\\':
|
|
||||||
return lexMultilineStringEscape
|
|
||||||
case stringEnd:
|
|
||||||
if lx.accept(stringEnd) {
|
|
||||||
if lx.accept(stringEnd) {
|
|
||||||
lx.backup()
|
|
||||||
lx.backup()
|
|
||||||
lx.backup()
|
|
||||||
lx.emit(itemMultilineString)
|
|
||||||
lx.next()
|
|
||||||
lx.next()
|
|
||||||
lx.next()
|
|
||||||
lx.ignore()
|
|
||||||
return lx.pop()
|
|
||||||
}
|
|
||||||
lx.backup()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return lexMultilineString
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexRawString consumes a raw string. Nothing can be escaped in such a string.
|
|
||||||
// It assumes that the beginning "'" has already been consumed and ignored.
|
|
||||||
func lexRawString(lx *lexer) stateFn {
|
|
||||||
r := lx.next()
|
|
||||||
switch {
|
|
||||||
case r == eof:
|
|
||||||
return lx.errorf("unexpected EOF")
|
|
||||||
case isNL(r):
|
|
||||||
return lx.errorf("strings cannot contain newlines")
|
|
||||||
case r == rawStringEnd:
|
|
||||||
lx.backup()
|
|
||||||
lx.emit(itemRawString)
|
|
||||||
lx.next()
|
|
||||||
lx.ignore()
|
|
||||||
return lx.pop()
|
|
||||||
}
|
|
||||||
return lexRawString
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
|
|
||||||
// a string. It assumes that the beginning "'''" has already been consumed and
|
|
||||||
// ignored.
|
|
||||||
func lexMultilineRawString(lx *lexer) stateFn {
|
|
||||||
switch lx.next() {
|
|
||||||
case eof:
|
|
||||||
return lx.errorf("unexpected EOF")
|
|
||||||
case rawStringEnd:
|
|
||||||
if lx.accept(rawStringEnd) {
|
|
||||||
if lx.accept(rawStringEnd) {
|
|
||||||
lx.backup()
|
|
||||||
lx.backup()
|
|
||||||
lx.backup()
|
|
||||||
lx.emit(itemRawMultilineString)
|
|
||||||
lx.next()
|
|
||||||
lx.next()
|
|
||||||
lx.next()
|
|
||||||
lx.ignore()
|
|
||||||
return lx.pop()
|
|
||||||
}
|
|
||||||
lx.backup()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return lexMultilineRawString
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexMultilineStringEscape consumes an escaped character. It assumes that the
|
|
||||||
// preceding '\\' has already been consumed.
|
|
||||||
func lexMultilineStringEscape(lx *lexer) stateFn {
|
|
||||||
// Handle the special case first:
|
|
||||||
if isNL(lx.next()) {
|
|
||||||
return lexMultilineString
|
|
||||||
}
|
|
||||||
lx.backup()
|
|
||||||
lx.push(lexMultilineString)
|
|
||||||
return lexStringEscape(lx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func lexStringEscape(lx *lexer) stateFn {
|
|
||||||
r := lx.next()
|
|
||||||
switch r {
|
|
||||||
case 'b':
|
|
||||||
fallthrough
|
|
||||||
case 't':
|
|
||||||
fallthrough
|
|
||||||
case 'n':
|
|
||||||
fallthrough
|
|
||||||
case 'f':
|
|
||||||
fallthrough
|
|
||||||
case 'r':
|
|
||||||
fallthrough
|
|
||||||
case '"':
|
|
||||||
fallthrough
|
|
||||||
case '\\':
|
|
||||||
return lx.pop()
|
|
||||||
case 'u':
|
|
||||||
return lexShortUnicodeEscape
|
|
||||||
case 'U':
|
|
||||||
return lexLongUnicodeEscape
|
|
||||||
}
|
|
||||||
return lx.errorf("invalid escape character %q; only the following "+
|
|
||||||
"escape characters are allowed: "+
|
|
||||||
`\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r)
|
|
||||||
}
|
|
||||||
|
|
||||||
func lexShortUnicodeEscape(lx *lexer) stateFn {
|
|
||||||
var r rune
|
|
||||||
for i := 0; i < 4; i++ {
|
|
||||||
r = lx.next()
|
|
||||||
if !isHexadecimal(r) {
|
|
||||||
return lx.errorf(`expected four hexadecimal digits after '\u', `+
|
|
||||||
"but got %q instead", lx.current())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return lx.pop()
|
|
||||||
}
|
|
||||||
|
|
||||||
func lexLongUnicodeEscape(lx *lexer) stateFn {
|
|
||||||
var r rune
|
|
||||||
for i := 0; i < 8; i++ {
|
|
||||||
r = lx.next()
|
|
||||||
if !isHexadecimal(r) {
|
|
||||||
return lx.errorf(`expected eight hexadecimal digits after '\U', `+
|
|
||||||
"but got %q instead", lx.current())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return lx.pop()
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexNumberOrDateStart consumes either an integer, a float, or datetime.
|
|
||||||
func lexNumberOrDateStart(lx *lexer) stateFn {
|
|
||||||
r := lx.next()
|
|
||||||
if isDigit(r) {
|
|
||||||
return lexNumberOrDate
|
|
||||||
}
|
|
||||||
switch r {
|
|
||||||
case '_':
|
|
||||||
return lexNumber
|
|
||||||
case 'e', 'E':
|
|
||||||
return lexFloat
|
|
||||||
case '.':
|
|
||||||
return lx.errorf("floats must start with a digit, not '.'")
|
|
||||||
}
|
|
||||||
return lx.errorf("expected a digit but got %q", r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexNumberOrDate consumes either an integer, float or datetime.
|
|
||||||
func lexNumberOrDate(lx *lexer) stateFn {
|
|
||||||
r := lx.next()
|
|
||||||
if isDigit(r) {
|
|
||||||
return lexNumberOrDate
|
|
||||||
}
|
|
||||||
switch r {
|
|
||||||
case '-':
|
|
||||||
return lexDatetime
|
|
||||||
case '_':
|
|
||||||
return lexNumber
|
|
||||||
case '.', 'e', 'E':
|
|
||||||
return lexFloat
|
|
||||||
}
|
|
||||||
|
|
||||||
lx.backup()
|
|
||||||
lx.emit(itemInteger)
|
|
||||||
return lx.pop()
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexDatetime consumes a Datetime, to a first approximation.
|
|
||||||
// The parser validates that it matches one of the accepted formats.
|
|
||||||
func lexDatetime(lx *lexer) stateFn {
|
|
||||||
r := lx.next()
|
|
||||||
if isDigit(r) {
|
|
||||||
return lexDatetime
|
|
||||||
}
|
|
||||||
switch r {
|
|
||||||
case '-', 'T', ':', '.', 'Z', '+':
|
|
||||||
return lexDatetime
|
|
||||||
}
|
|
||||||
|
|
||||||
lx.backup()
|
|
||||||
lx.emit(itemDatetime)
|
|
||||||
return lx.pop()
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexNumberStart consumes either an integer or a float. It assumes that a sign
|
|
||||||
// has already been read, but that *no* digits have been consumed.
|
|
||||||
// lexNumberStart will move to the appropriate integer or float states.
|
|
||||||
func lexNumberStart(lx *lexer) stateFn {
|
|
||||||
// We MUST see a digit. Even floats have to start with a digit.
|
|
||||||
r := lx.next()
|
|
||||||
if !isDigit(r) {
|
|
||||||
if r == '.' {
|
|
||||||
return lx.errorf("floats must start with a digit, not '.'")
|
|
||||||
}
|
|
||||||
return lx.errorf("expected a digit but got %q", r)
|
|
||||||
}
|
|
||||||
return lexNumber
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexNumber consumes an integer or a float after seeing the first digit.
|
|
||||||
func lexNumber(lx *lexer) stateFn {
|
|
||||||
r := lx.next()
|
|
||||||
if isDigit(r) {
|
|
||||||
return lexNumber
|
|
||||||
}
|
|
||||||
switch r {
|
|
||||||
case '_':
|
|
||||||
return lexNumber
|
|
||||||
case '.', 'e', 'E':
|
|
||||||
return lexFloat
|
|
||||||
}
|
|
||||||
|
|
||||||
lx.backup()
|
|
||||||
lx.emit(itemInteger)
|
|
||||||
return lx.pop()
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexFloat consumes the elements of a float. It allows any sequence of
|
|
||||||
// float-like characters, so floats emitted by the lexer are only a first
|
|
||||||
// approximation and must be validated by the parser.
|
|
||||||
func lexFloat(lx *lexer) stateFn {
|
|
||||||
r := lx.next()
|
|
||||||
if isDigit(r) {
|
|
||||||
return lexFloat
|
|
||||||
}
|
|
||||||
switch r {
|
|
||||||
case '_', '.', '-', '+', 'e', 'E':
|
|
||||||
return lexFloat
|
|
||||||
}
|
|
||||||
|
|
||||||
lx.backup()
|
|
||||||
lx.emit(itemFloat)
|
|
||||||
return lx.pop()
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexBool consumes a bool string: 'true' or 'false.
|
|
||||||
func lexBool(lx *lexer) stateFn {
|
|
||||||
var rs []rune
|
|
||||||
for {
|
|
||||||
r := lx.next()
|
|
||||||
if !unicode.IsLetter(r) {
|
|
||||||
lx.backup()
|
|
||||||
break
|
|
||||||
}
|
|
||||||
rs = append(rs, r)
|
|
||||||
}
|
|
||||||
s := string(rs)
|
|
||||||
switch s {
|
|
||||||
case "true", "false":
|
|
||||||
lx.emit(itemBool)
|
|
||||||
return lx.pop()
|
|
||||||
}
|
|
||||||
return lx.errorf("expected value but found %q instead", s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexCommentStart begins the lexing of a comment. It will emit
|
|
||||||
// itemCommentStart and consume no characters, passing control to lexComment.
|
|
||||||
func lexCommentStart(lx *lexer) stateFn {
|
|
||||||
lx.ignore()
|
|
||||||
lx.emit(itemCommentStart)
|
|
||||||
return lexComment
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexComment lexes an entire comment. It assumes that '#' has been consumed.
|
|
||||||
// It will consume *up to* the first newline character, and pass control
|
|
||||||
// back to the last state on the stack.
|
|
||||||
func lexComment(lx *lexer) stateFn {
|
|
||||||
r := lx.peek()
|
|
||||||
if isNL(r) || r == eof {
|
|
||||||
lx.emit(itemText)
|
|
||||||
return lx.pop()
|
|
||||||
}
|
|
||||||
lx.next()
|
|
||||||
return lexComment
|
|
||||||
}
|
|
||||||
|
|
||||||
// lexSkip ignores all slurped input and moves on to the next state.
|
|
||||||
func lexSkip(lx *lexer, nextState stateFn) stateFn {
|
|
||||||
return func(lx *lexer) stateFn {
|
|
||||||
lx.ignore()
|
|
||||||
return nextState
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// isWhitespace returns true if `r` is a whitespace character according
|
|
||||||
// to the spec.
|
|
||||||
func isWhitespace(r rune) bool {
|
|
||||||
return r == '\t' || r == ' '
|
|
||||||
}
|
|
||||||
|
|
||||||
func isNL(r rune) bool {
|
|
||||||
return r == '\n' || r == '\r'
|
|
||||||
}
|
|
||||||
|
|
||||||
func isDigit(r rune) bool {
|
|
||||||
return r >= '0' && r <= '9'
|
|
||||||
}
|
|
||||||
|
|
||||||
func isHexadecimal(r rune) bool {
|
|
||||||
return (r >= '0' && r <= '9') ||
|
|
||||||
(r >= 'a' && r <= 'f') ||
|
|
||||||
(r >= 'A' && r <= 'F')
|
|
||||||
}
|
|
||||||
|
|
||||||
func isBareKeyChar(r rune) bool {
|
|
||||||
return (r >= 'A' && r <= 'Z') ||
|
|
||||||
(r >= 'a' && r <= 'z') ||
|
|
||||||
(r >= '0' && r <= '9') ||
|
|
||||||
r == '_' ||
|
|
||||||
r == '-'
|
|
||||||
}
|
|
||||||
|
|
||||||
func (itype itemType) String() string {
|
|
||||||
switch itype {
|
|
||||||
case itemError:
|
|
||||||
return "Error"
|
|
||||||
case itemNIL:
|
|
||||||
return "NIL"
|
|
||||||
case itemEOF:
|
|
||||||
return "EOF"
|
|
||||||
case itemText:
|
|
||||||
return "Text"
|
|
||||||
case itemString, itemRawString, itemMultilineString, itemRawMultilineString:
|
|
||||||
return "String"
|
|
||||||
case itemBool:
|
|
||||||
return "Bool"
|
|
||||||
case itemInteger:
|
|
||||||
return "Integer"
|
|
||||||
case itemFloat:
|
|
||||||
return "Float"
|
|
||||||
case itemDatetime:
|
|
||||||
return "DateTime"
|
|
||||||
case itemTableStart:
|
|
||||||
return "TableStart"
|
|
||||||
case itemTableEnd:
|
|
||||||
return "TableEnd"
|
|
||||||
case itemKeyStart:
|
|
||||||
return "KeyStart"
|
|
||||||
case itemArray:
|
|
||||||
return "Array"
|
|
||||||
case itemArrayEnd:
|
|
||||||
return "ArrayEnd"
|
|
||||||
case itemCommentStart:
|
|
||||||
return "CommentStart"
|
|
||||||
}
|
|
||||||
panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype)))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (item item) String() string {
|
|
||||||
return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val)
|
|
||||||
}
|
|
592
vendor/github.com/BurntSushi/toml/parse.go
generated
vendored
592
vendor/github.com/BurntSushi/toml/parse.go
generated
vendored
|
@ -1,592 +0,0 @@
|
||||||
package toml
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
"unicode"
|
|
||||||
"unicode/utf8"
|
|
||||||
)
|
|
||||||
|
|
||||||
type parser struct {
|
|
||||||
mapping map[string]interface{}
|
|
||||||
types map[string]tomlType
|
|
||||||
lx *lexer
|
|
||||||
|
|
||||||
// A list of keys in the order that they appear in the TOML data.
|
|
||||||
ordered []Key
|
|
||||||
|
|
||||||
// the full key for the current hash in scope
|
|
||||||
context Key
|
|
||||||
|
|
||||||
// the base key name for everything except hashes
|
|
||||||
currentKey string
|
|
||||||
|
|
||||||
// rough approximation of line number
|
|
||||||
approxLine int
|
|
||||||
|
|
||||||
// A map of 'key.group.names' to whether they were created implicitly.
|
|
||||||
implicits map[string]bool
|
|
||||||
}
|
|
||||||
|
|
||||||
type parseError string
|
|
||||||
|
|
||||||
func (pe parseError) Error() string {
|
|
||||||
return string(pe)
|
|
||||||
}
|
|
||||||
|
|
||||||
func parse(data string) (p *parser, err error) {
|
|
||||||
defer func() {
|
|
||||||
if r := recover(); r != nil {
|
|
||||||
var ok bool
|
|
||||||
if err, ok = r.(parseError); ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
panic(r)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
p = &parser{
|
|
||||||
mapping: make(map[string]interface{}),
|
|
||||||
types: make(map[string]tomlType),
|
|
||||||
lx: lex(data),
|
|
||||||
ordered: make([]Key, 0),
|
|
||||||
implicits: make(map[string]bool),
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
item := p.next()
|
|
||||||
if item.typ == itemEOF {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
p.topLevel(item)
|
|
||||||
}
|
|
||||||
|
|
||||||
return p, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *parser) panicf(format string, v ...interface{}) {
|
|
||||||
msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s",
|
|
||||||
p.approxLine, p.current(), fmt.Sprintf(format, v...))
|
|
||||||
panic(parseError(msg))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *parser) next() item {
|
|
||||||
it := p.lx.nextItem()
|
|
||||||
if it.typ == itemError {
|
|
||||||
p.panicf("%s", it.val)
|
|
||||||
}
|
|
||||||
return it
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *parser) bug(format string, v ...interface{}) {
|
|
||||||
panic(fmt.Sprintf("BUG: "+format+"\n\n", v...))
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *parser) expect(typ itemType) item {
|
|
||||||
it := p.next()
|
|
||||||
p.assertEqual(typ, it.typ)
|
|
||||||
return it
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *parser) assertEqual(expected, got itemType) {
|
|
||||||
if expected != got {
|
|
||||||
p.bug("Expected '%s' but got '%s'.", expected, got)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *parser) topLevel(item item) {
|
|
||||||
switch item.typ {
|
|
||||||
case itemCommentStart:
|
|
||||||
p.approxLine = item.line
|
|
||||||
p.expect(itemText)
|
|
||||||
case itemTableStart:
|
|
||||||
kg := p.next()
|
|
||||||
p.approxLine = kg.line
|
|
||||||
|
|
||||||
var key Key
|
|
||||||
for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() {
|
|
||||||
key = append(key, p.keyString(kg))
|
|
||||||
}
|
|
||||||
p.assertEqual(itemTableEnd, kg.typ)
|
|
||||||
|
|
||||||
p.establishContext(key, false)
|
|
||||||
p.setType("", tomlHash)
|
|
||||||
p.ordered = append(p.ordered, key)
|
|
||||||
case itemArrayTableStart:
|
|
||||||
kg := p.next()
|
|
||||||
p.approxLine = kg.line
|
|
||||||
|
|
||||||
var key Key
|
|
||||||
for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() {
|
|
||||||
key = append(key, p.keyString(kg))
|
|
||||||
}
|
|
||||||
p.assertEqual(itemArrayTableEnd, kg.typ)
|
|
||||||
|
|
||||||
p.establishContext(key, true)
|
|
||||||
p.setType("", tomlArrayHash)
|
|
||||||
p.ordered = append(p.ordered, key)
|
|
||||||
case itemKeyStart:
|
|
||||||
kname := p.next()
|
|
||||||
p.approxLine = kname.line
|
|
||||||
p.currentKey = p.keyString(kname)
|
|
||||||
|
|
||||||
val, typ := p.value(p.next())
|
|
||||||
p.setValue(p.currentKey, val)
|
|
||||||
p.setType(p.currentKey, typ)
|
|
||||||
p.ordered = append(p.ordered, p.context.add(p.currentKey))
|
|
||||||
p.currentKey = ""
|
|
||||||
default:
|
|
||||||
p.bug("Unexpected type at top level: %s", item.typ)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Gets a string for a key (or part of a key in a table name).
|
|
||||||
func (p *parser) keyString(it item) string {
|
|
||||||
switch it.typ {
|
|
||||||
case itemText:
|
|
||||||
return it.val
|
|
||||||
case itemString, itemMultilineString,
|
|
||||||
itemRawString, itemRawMultilineString:
|
|
||||||
s, _ := p.value(it)
|
|
||||||
return s.(string)
|
|
||||||
default:
|
|
||||||
p.bug("Unexpected key type: %s", it.typ)
|
|
||||||
panic("unreachable")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// value translates an expected value from the lexer into a Go value wrapped
|
|
||||||
// as an empty interface.
|
|
||||||
func (p *parser) value(it item) (interface{}, tomlType) {
|
|
||||||
switch it.typ {
|
|
||||||
case itemString:
|
|
||||||
return p.replaceEscapes(it.val), p.typeOfPrimitive(it)
|
|
||||||
case itemMultilineString:
|
|
||||||
trimmed := stripFirstNewline(stripEscapedWhitespace(it.val))
|
|
||||||
return p.replaceEscapes(trimmed), p.typeOfPrimitive(it)
|
|
||||||
case itemRawString:
|
|
||||||
return it.val, p.typeOfPrimitive(it)
|
|
||||||
case itemRawMultilineString:
|
|
||||||
return stripFirstNewline(it.val), p.typeOfPrimitive(it)
|
|
||||||
case itemBool:
|
|
||||||
switch it.val {
|
|
||||||
case "true":
|
|
||||||
return true, p.typeOfPrimitive(it)
|
|
||||||
case "false":
|
|
||||||
return false, p.typeOfPrimitive(it)
|
|
||||||
}
|
|
||||||
p.bug("Expected boolean value, but got '%s'.", it.val)
|
|
||||||
case itemInteger:
|
|
||||||
if !numUnderscoresOK(it.val) {
|
|
||||||
p.panicf("Invalid integer %q: underscores must be surrounded by digits",
|
|
||||||
it.val)
|
|
||||||
}
|
|
||||||
val := strings.Replace(it.val, "_", "", -1)
|
|
||||||
num, err := strconv.ParseInt(val, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
// Distinguish integer values. Normally, it'd be a bug if the lexer
|
|
||||||
// provides an invalid integer, but it's possible that the number is
|
|
||||||
// out of range of valid values (which the lexer cannot determine).
|
|
||||||
// So mark the former as a bug but the latter as a legitimate user
|
|
||||||
// error.
|
|
||||||
if e, ok := err.(*strconv.NumError); ok &&
|
|
||||||
e.Err == strconv.ErrRange {
|
|
||||||
|
|
||||||
p.panicf("Integer '%s' is out of the range of 64-bit "+
|
|
||||||
"signed integers.", it.val)
|
|
||||||
} else {
|
|
||||||
p.bug("Expected integer value, but got '%s'.", it.val)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return num, p.typeOfPrimitive(it)
|
|
||||||
case itemFloat:
|
|
||||||
parts := strings.FieldsFunc(it.val, func(r rune) bool {
|
|
||||||
switch r {
|
|
||||||
case '.', 'e', 'E':
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
})
|
|
||||||
for _, part := range parts {
|
|
||||||
if !numUnderscoresOK(part) {
|
|
||||||
p.panicf("Invalid float %q: underscores must be "+
|
|
||||||
"surrounded by digits", it.val)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !numPeriodsOK(it.val) {
|
|
||||||
// As a special case, numbers like '123.' or '1.e2',
|
|
||||||
// which are valid as far as Go/strconv are concerned,
|
|
||||||
// must be rejected because TOML says that a fractional
|
|
||||||
// part consists of '.' followed by 1+ digits.
|
|
||||||
p.panicf("Invalid float %q: '.' must be followed "+
|
|
||||||
"by one or more digits", it.val)
|
|
||||||
}
|
|
||||||
val := strings.Replace(it.val, "_", "", -1)
|
|
||||||
num, err := strconv.ParseFloat(val, 64)
|
|
||||||
if err != nil {
|
|
||||||
if e, ok := err.(*strconv.NumError); ok &&
|
|
||||||
e.Err == strconv.ErrRange {
|
|
||||||
|
|
||||||
p.panicf("Float '%s' is out of the range of 64-bit "+
|
|
||||||
"IEEE-754 floating-point numbers.", it.val)
|
|
||||||
} else {
|
|
||||||
p.panicf("Invalid float value: %q", it.val)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return num, p.typeOfPrimitive(it)
|
|
||||||
case itemDatetime:
|
|
||||||
var t time.Time
|
|
||||||
var ok bool
|
|
||||||
var err error
|
|
||||||
for _, format := range []string{
|
|
||||||
"2006-01-02T15:04:05Z07:00",
|
|
||||||
"2006-01-02T15:04:05",
|
|
||||||
"2006-01-02",
|
|
||||||
} {
|
|
||||||
t, err = time.ParseInLocation(format, it.val, time.Local)
|
|
||||||
if err == nil {
|
|
||||||
ok = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !ok {
|
|
||||||
p.panicf("Invalid TOML Datetime: %q.", it.val)
|
|
||||||
}
|
|
||||||
return t, p.typeOfPrimitive(it)
|
|
||||||
case itemArray:
|
|
||||||
array := make([]interface{}, 0)
|
|
||||||
types := make([]tomlType, 0)
|
|
||||||
|
|
||||||
for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
|
|
||||||
if it.typ == itemCommentStart {
|
|
||||||
p.expect(itemText)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
val, typ := p.value(it)
|
|
||||||
array = append(array, val)
|
|
||||||
types = append(types, typ)
|
|
||||||
}
|
|
||||||
return array, p.typeOfArray(types)
|
|
||||||
case itemInlineTableStart:
|
|
||||||
var (
|
|
||||||
hash = make(map[string]interface{})
|
|
||||||
outerContext = p.context
|
|
||||||
outerKey = p.currentKey
|
|
||||||
)
|
|
||||||
|
|
||||||
p.context = append(p.context, p.currentKey)
|
|
||||||
p.currentKey = ""
|
|
||||||
for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() {
|
|
||||||
if it.typ != itemKeyStart {
|
|
||||||
p.bug("Expected key start but instead found %q, around line %d",
|
|
||||||
it.val, p.approxLine)
|
|
||||||
}
|
|
||||||
if it.typ == itemCommentStart {
|
|
||||||
p.expect(itemText)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// retrieve key
|
|
||||||
k := p.next()
|
|
||||||
p.approxLine = k.line
|
|
||||||
kname := p.keyString(k)
|
|
||||||
|
|
||||||
// retrieve value
|
|
||||||
p.currentKey = kname
|
|
||||||
val, typ := p.value(p.next())
|
|
||||||
// make sure we keep metadata up to date
|
|
||||||
p.setType(kname, typ)
|
|
||||||
p.ordered = append(p.ordered, p.context.add(p.currentKey))
|
|
||||||
hash[kname] = val
|
|
||||||
}
|
|
||||||
p.context = outerContext
|
|
||||||
p.currentKey = outerKey
|
|
||||||
return hash, tomlHash
|
|
||||||
}
|
|
||||||
p.bug("Unexpected value type: %s", it.typ)
|
|
||||||
panic("unreachable")
|
|
||||||
}
|
|
||||||
|
|
||||||
// numUnderscoresOK checks whether each underscore in s is surrounded by
|
|
||||||
// characters that are not underscores.
|
|
||||||
func numUnderscoresOK(s string) bool {
|
|
||||||
accept := false
|
|
||||||
for _, r := range s {
|
|
||||||
if r == '_' {
|
|
||||||
if !accept {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
accept = false
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
accept = true
|
|
||||||
}
|
|
||||||
return accept
|
|
||||||
}
|
|
||||||
|
|
||||||
// numPeriodsOK checks whether every period in s is followed by a digit.
|
|
||||||
func numPeriodsOK(s string) bool {
|
|
||||||
period := false
|
|
||||||
for _, r := range s {
|
|
||||||
if period && !isDigit(r) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
period = r == '.'
|
|
||||||
}
|
|
||||||
return !period
|
|
||||||
}
|
|
||||||
|
|
||||||
// establishContext sets the current context of the parser,
|
|
||||||
// where the context is either a hash or an array of hashes. Which one is
|
|
||||||
// set depends on the value of the `array` parameter.
|
|
||||||
//
|
|
||||||
// Establishing the context also makes sure that the key isn't a duplicate, and
|
|
||||||
// will create implicit hashes automatically.
|
|
||||||
func (p *parser) establishContext(key Key, array bool) {
|
|
||||||
var ok bool
|
|
||||||
|
|
||||||
// Always start at the top level and drill down for our context.
|
|
||||||
hashContext := p.mapping
|
|
||||||
keyContext := make(Key, 0)
|
|
||||||
|
|
||||||
// We only need implicit hashes for key[0:-1]
|
|
||||||
for _, k := range key[0 : len(key)-1] {
|
|
||||||
_, ok = hashContext[k]
|
|
||||||
keyContext = append(keyContext, k)
|
|
||||||
|
|
||||||
// No key? Make an implicit hash and move on.
|
|
||||||
if !ok {
|
|
||||||
p.addImplicit(keyContext)
|
|
||||||
hashContext[k] = make(map[string]interface{})
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the hash context is actually an array of tables, then set
|
|
||||||
// the hash context to the last element in that array.
|
|
||||||
//
|
|
||||||
// Otherwise, it better be a table, since this MUST be a key group (by
|
|
||||||
// virtue of it not being the last element in a key).
|
|
||||||
switch t := hashContext[k].(type) {
|
|
||||||
case []map[string]interface{}:
|
|
||||||
hashContext = t[len(t)-1]
|
|
||||||
case map[string]interface{}:
|
|
||||||
hashContext = t
|
|
||||||
default:
|
|
||||||
p.panicf("Key '%s' was already created as a hash.", keyContext)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
p.context = keyContext
|
|
||||||
if array {
|
|
||||||
// If this is the first element for this array, then allocate a new
|
|
||||||
// list of tables for it.
|
|
||||||
k := key[len(key)-1]
|
|
||||||
if _, ok := hashContext[k]; !ok {
|
|
||||||
hashContext[k] = make([]map[string]interface{}, 0, 5)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add a new table. But make sure the key hasn't already been used
|
|
||||||
// for something else.
|
|
||||||
if hash, ok := hashContext[k].([]map[string]interface{}); ok {
|
|
||||||
hashContext[k] = append(hash, make(map[string]interface{}))
|
|
||||||
} else {
|
|
||||||
p.panicf("Key '%s' was already created and cannot be used as "+
|
|
||||||
"an array.", keyContext)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
p.setValue(key[len(key)-1], make(map[string]interface{}))
|
|
||||||
}
|
|
||||||
p.context = append(p.context, key[len(key)-1])
|
|
||||||
}
|
|
||||||
|
|
||||||
// setValue sets the given key to the given value in the current context.
|
|
||||||
// It will make sure that the key hasn't already been defined, account for
|
|
||||||
// implicit key groups.
|
|
||||||
func (p *parser) setValue(key string, value interface{}) {
|
|
||||||
var tmpHash interface{}
|
|
||||||
var ok bool
|
|
||||||
|
|
||||||
hash := p.mapping
|
|
||||||
keyContext := make(Key, 0)
|
|
||||||
for _, k := range p.context {
|
|
||||||
keyContext = append(keyContext, k)
|
|
||||||
if tmpHash, ok = hash[k]; !ok {
|
|
||||||
p.bug("Context for key '%s' has not been established.", keyContext)
|
|
||||||
}
|
|
||||||
switch t := tmpHash.(type) {
|
|
||||||
case []map[string]interface{}:
|
|
||||||
// The context is a table of hashes. Pick the most recent table
|
|
||||||
// defined as the current hash.
|
|
||||||
hash = t[len(t)-1]
|
|
||||||
case map[string]interface{}:
|
|
||||||
hash = t
|
|
||||||
default:
|
|
||||||
p.bug("Expected hash to have type 'map[string]interface{}', but "+
|
|
||||||
"it has '%T' instead.", tmpHash)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
keyContext = append(keyContext, key)
|
|
||||||
|
|
||||||
if _, ok := hash[key]; ok {
|
|
||||||
// Typically, if the given key has already been set, then we have
|
|
||||||
// to raise an error since duplicate keys are disallowed. However,
|
|
||||||
// it's possible that a key was previously defined implicitly. In this
|
|
||||||
// case, it is allowed to be redefined concretely. (See the
|
|
||||||
// `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.)
|
|
||||||
//
|
|
||||||
// But we have to make sure to stop marking it as an implicit. (So that
|
|
||||||
// another redefinition provokes an error.)
|
|
||||||
//
|
|
||||||
// Note that since it has already been defined (as a hash), we don't
|
|
||||||
// want to overwrite it. So our business is done.
|
|
||||||
if p.isImplicit(keyContext) {
|
|
||||||
p.removeImplicit(keyContext)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Otherwise, we have a concrete key trying to override a previous
|
|
||||||
// key, which is *always* wrong.
|
|
||||||
p.panicf("Key '%s' has already been defined.", keyContext)
|
|
||||||
}
|
|
||||||
hash[key] = value
|
|
||||||
}
|
|
||||||
|
|
||||||
// setType sets the type of a particular value at a given key.
|
|
||||||
// It should be called immediately AFTER setValue.
|
|
||||||
//
|
|
||||||
// Note that if `key` is empty, then the type given will be applied to the
|
|
||||||
// current context (which is either a table or an array of tables).
|
|
||||||
func (p *parser) setType(key string, typ tomlType) {
|
|
||||||
keyContext := make(Key, 0, len(p.context)+1)
|
|
||||||
for _, k := range p.context {
|
|
||||||
keyContext = append(keyContext, k)
|
|
||||||
}
|
|
||||||
if len(key) > 0 { // allow type setting for hashes
|
|
||||||
keyContext = append(keyContext, key)
|
|
||||||
}
|
|
||||||
p.types[keyContext.String()] = typ
|
|
||||||
}
|
|
||||||
|
|
||||||
// addImplicit sets the given Key as having been created implicitly.
|
|
||||||
func (p *parser) addImplicit(key Key) {
|
|
||||||
p.implicits[key.String()] = true
|
|
||||||
}
|
|
||||||
|
|
||||||
// removeImplicit stops tagging the given key as having been implicitly
|
|
||||||
// created.
|
|
||||||
func (p *parser) removeImplicit(key Key) {
|
|
||||||
p.implicits[key.String()] = false
|
|
||||||
}
|
|
||||||
|
|
||||||
// isImplicit returns true if the key group pointed to by the key was created
|
|
||||||
// implicitly.
|
|
||||||
func (p *parser) isImplicit(key Key) bool {
|
|
||||||
return p.implicits[key.String()]
|
|
||||||
}
|
|
||||||
|
|
||||||
// current returns the full key name of the current context.
|
|
||||||
func (p *parser) current() string {
|
|
||||||
if len(p.currentKey) == 0 {
|
|
||||||
return p.context.String()
|
|
||||||
}
|
|
||||||
if len(p.context) == 0 {
|
|
||||||
return p.currentKey
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("%s.%s", p.context, p.currentKey)
|
|
||||||
}
|
|
||||||
|
|
||||||
func stripFirstNewline(s string) string {
|
|
||||||
if len(s) == 0 || s[0] != '\n' {
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
return s[1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
func stripEscapedWhitespace(s string) string {
|
|
||||||
esc := strings.Split(s, "\\\n")
|
|
||||||
if len(esc) > 1 {
|
|
||||||
for i := 1; i < len(esc); i++ {
|
|
||||||
esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return strings.Join(esc, "")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *parser) replaceEscapes(str string) string {
|
|
||||||
var replaced []rune
|
|
||||||
s := []byte(str)
|
|
||||||
r := 0
|
|
||||||
for r < len(s) {
|
|
||||||
if s[r] != '\\' {
|
|
||||||
c, size := utf8.DecodeRune(s[r:])
|
|
||||||
r += size
|
|
||||||
replaced = append(replaced, c)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
r += 1
|
|
||||||
if r >= len(s) {
|
|
||||||
p.bug("Escape sequence at end of string.")
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
switch s[r] {
|
|
||||||
default:
|
|
||||||
p.bug("Expected valid escape code after \\, but got %q.", s[r])
|
|
||||||
return ""
|
|
||||||
case 'b':
|
|
||||||
replaced = append(replaced, rune(0x0008))
|
|
||||||
r += 1
|
|
||||||
case 't':
|
|
||||||
replaced = append(replaced, rune(0x0009))
|
|
||||||
r += 1
|
|
||||||
case 'n':
|
|
||||||
replaced = append(replaced, rune(0x000A))
|
|
||||||
r += 1
|
|
||||||
case 'f':
|
|
||||||
replaced = append(replaced, rune(0x000C))
|
|
||||||
r += 1
|
|
||||||
case 'r':
|
|
||||||
replaced = append(replaced, rune(0x000D))
|
|
||||||
r += 1
|
|
||||||
case '"':
|
|
||||||
replaced = append(replaced, rune(0x0022))
|
|
||||||
r += 1
|
|
||||||
case '\\':
|
|
||||||
replaced = append(replaced, rune(0x005C))
|
|
||||||
r += 1
|
|
||||||
case 'u':
|
|
||||||
// At this point, we know we have a Unicode escape of the form
|
|
||||||
// `uXXXX` at [r, r+5). (Because the lexer guarantees this
|
|
||||||
// for us.)
|
|
||||||
escaped := p.asciiEscapeToUnicode(s[r+1 : r+5])
|
|
||||||
replaced = append(replaced, escaped)
|
|
||||||
r += 5
|
|
||||||
case 'U':
|
|
||||||
// At this point, we know we have a Unicode escape of the form
|
|
||||||
// `uXXXX` at [r, r+9). (Because the lexer guarantees this
|
|
||||||
// for us.)
|
|
||||||
escaped := p.asciiEscapeToUnicode(s[r+1 : r+9])
|
|
||||||
replaced = append(replaced, escaped)
|
|
||||||
r += 9
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return string(replaced)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *parser) asciiEscapeToUnicode(bs []byte) rune {
|
|
||||||
s := string(bs)
|
|
||||||
hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
|
|
||||||
if err != nil {
|
|
||||||
p.bug("Could not parse '%s' as a hexadecimal number, but the "+
|
|
||||||
"lexer claims it's OK: %s", s, err)
|
|
||||||
}
|
|
||||||
if !utf8.ValidRune(rune(hex)) {
|
|
||||||
p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s)
|
|
||||||
}
|
|
||||||
return rune(hex)
|
|
||||||
}
|
|
||||||
|
|
||||||
func isStringType(ty itemType) bool {
|
|
||||||
return ty == itemString || ty == itemMultilineString ||
|
|
||||||
ty == itemRawString || ty == itemRawMultilineString
|
|
||||||
}
|
|
1
vendor/github.com/BurntSushi/toml/session.vim
generated
vendored
1
vendor/github.com/BurntSushi/toml/session.vim
generated
vendored
|
@ -1 +0,0 @@
|
||||||
au BufWritePost *.go silent!make tags > /dev/null 2>&1
|
|
91
vendor/github.com/BurntSushi/toml/type_check.go
generated
vendored
91
vendor/github.com/BurntSushi/toml/type_check.go
generated
vendored
|
@ -1,91 +0,0 @@
|
||||||
package toml
|
|
||||||
|
|
||||||
// tomlType represents any Go type that corresponds to a TOML type.
|
|
||||||
// While the first draft of the TOML spec has a simplistic type system that
|
|
||||||
// probably doesn't need this level of sophistication, we seem to be militating
|
|
||||||
// toward adding real composite types.
|
|
||||||
type tomlType interface {
|
|
||||||
typeString() string
|
|
||||||
}
|
|
||||||
|
|
||||||
// typeEqual accepts any two types and returns true if they are equal.
|
|
||||||
func typeEqual(t1, t2 tomlType) bool {
|
|
||||||
if t1 == nil || t2 == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return t1.typeString() == t2.typeString()
|
|
||||||
}
|
|
||||||
|
|
||||||
func typeIsHash(t tomlType) bool {
|
|
||||||
return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash)
|
|
||||||
}
|
|
||||||
|
|
||||||
type tomlBaseType string
|
|
||||||
|
|
||||||
func (btype tomlBaseType) typeString() string {
|
|
||||||
return string(btype)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (btype tomlBaseType) String() string {
|
|
||||||
return btype.typeString()
|
|
||||||
}
|
|
||||||
|
|
||||||
var (
|
|
||||||
tomlInteger tomlBaseType = "Integer"
|
|
||||||
tomlFloat tomlBaseType = "Float"
|
|
||||||
tomlDatetime tomlBaseType = "Datetime"
|
|
||||||
tomlString tomlBaseType = "String"
|
|
||||||
tomlBool tomlBaseType = "Bool"
|
|
||||||
tomlArray tomlBaseType = "Array"
|
|
||||||
tomlHash tomlBaseType = "Hash"
|
|
||||||
tomlArrayHash tomlBaseType = "ArrayHash"
|
|
||||||
)
|
|
||||||
|
|
||||||
// typeOfPrimitive returns a tomlType of any primitive value in TOML.
|
|
||||||
// Primitive values are: Integer, Float, Datetime, String and Bool.
|
|
||||||
//
|
|
||||||
// Passing a lexer item other than the following will cause a BUG message
|
|
||||||
// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime.
|
|
||||||
func (p *parser) typeOfPrimitive(lexItem item) tomlType {
|
|
||||||
switch lexItem.typ {
|
|
||||||
case itemInteger:
|
|
||||||
return tomlInteger
|
|
||||||
case itemFloat:
|
|
||||||
return tomlFloat
|
|
||||||
case itemDatetime:
|
|
||||||
return tomlDatetime
|
|
||||||
case itemString:
|
|
||||||
return tomlString
|
|
||||||
case itemMultilineString:
|
|
||||||
return tomlString
|
|
||||||
case itemRawString:
|
|
||||||
return tomlString
|
|
||||||
case itemRawMultilineString:
|
|
||||||
return tomlString
|
|
||||||
case itemBool:
|
|
||||||
return tomlBool
|
|
||||||
}
|
|
||||||
p.bug("Cannot infer primitive type of lex item '%s'.", lexItem)
|
|
||||||
panic("unreachable")
|
|
||||||
}
|
|
||||||
|
|
||||||
// typeOfArray returns a tomlType for an array given a list of types of its
|
|
||||||
// values.
|
|
||||||
//
|
|
||||||
// In the current spec, if an array is homogeneous, then its type is always
|
|
||||||
// "Array". If the array is not homogeneous, an error is generated.
|
|
||||||
func (p *parser) typeOfArray(types []tomlType) tomlType {
|
|
||||||
// Empty arrays are cool.
|
|
||||||
if len(types) == 0 {
|
|
||||||
return tomlArray
|
|
||||||
}
|
|
||||||
|
|
||||||
theType := types[0]
|
|
||||||
for _, t := range types[1:] {
|
|
||||||
if !typeEqual(theType, t) {
|
|
||||||
p.panicf("Array contains values of type '%s' and '%s', but "+
|
|
||||||
"arrays must be homogeneous.", theType, t)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return tomlArray
|
|
||||||
}
|
|
242
vendor/github.com/BurntSushi/toml/type_fields.go
generated
vendored
242
vendor/github.com/BurntSushi/toml/type_fields.go
generated
vendored
|
@ -1,242 +0,0 @@
|
||||||
package toml
|
|
||||||
|
|
||||||
// Struct field handling is adapted from code in encoding/json:
|
|
||||||
//
|
|
||||||
// Copyright 2010 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the Go distribution.
|
|
||||||
|
|
||||||
import (
|
|
||||||
"reflect"
|
|
||||||
"sort"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A field represents a single field found in a struct.
|
|
||||||
type field struct {
|
|
||||||
name string // the name of the field (`toml` tag included)
|
|
||||||
tag bool // whether field has a `toml` tag
|
|
||||||
index []int // represents the depth of an anonymous field
|
|
||||||
typ reflect.Type // the type of the field
|
|
||||||
}
|
|
||||||
|
|
||||||
// byName sorts field by name, breaking ties with depth,
|
|
||||||
// then breaking ties with "name came from toml tag", then
|
|
||||||
// breaking ties with index sequence.
|
|
||||||
type byName []field
|
|
||||||
|
|
||||||
func (x byName) Len() int { return len(x) }
|
|
||||||
|
|
||||||
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
|
||||||
|
|
||||||
func (x byName) Less(i, j int) bool {
|
|
||||||
if x[i].name != x[j].name {
|
|
||||||
return x[i].name < x[j].name
|
|
||||||
}
|
|
||||||
if len(x[i].index) != len(x[j].index) {
|
|
||||||
return len(x[i].index) < len(x[j].index)
|
|
||||||
}
|
|
||||||
if x[i].tag != x[j].tag {
|
|
||||||
return x[i].tag
|
|
||||||
}
|
|
||||||
return byIndex(x).Less(i, j)
|
|
||||||
}
|
|
||||||
|
|
||||||
// byIndex sorts field by index sequence.
|
|
||||||
type byIndex []field
|
|
||||||
|
|
||||||
func (x byIndex) Len() int { return len(x) }
|
|
||||||
|
|
||||||
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
|
||||||
|
|
||||||
func (x byIndex) Less(i, j int) bool {
|
|
||||||
for k, xik := range x[i].index {
|
|
||||||
if k >= len(x[j].index) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if xik != x[j].index[k] {
|
|
||||||
return xik < x[j].index[k]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return len(x[i].index) < len(x[j].index)
|
|
||||||
}
|
|
||||||
|
|
||||||
// typeFields returns a list of fields that TOML should recognize for the given
|
|
||||||
// type. The algorithm is breadth-first search over the set of structs to
|
|
||||||
// include - the top struct and then any reachable anonymous structs.
|
|
||||||
func typeFields(t reflect.Type) []field {
|
|
||||||
// Anonymous fields to explore at the current level and the next.
|
|
||||||
current := []field{}
|
|
||||||
next := []field{{typ: t}}
|
|
||||||
|
|
||||||
// Count of queued names for current level and the next.
|
|
||||||
count := map[reflect.Type]int{}
|
|
||||||
nextCount := map[reflect.Type]int{}
|
|
||||||
|
|
||||||
// Types already visited at an earlier level.
|
|
||||||
visited := map[reflect.Type]bool{}
|
|
||||||
|
|
||||||
// Fields found.
|
|
||||||
var fields []field
|
|
||||||
|
|
||||||
for len(next) > 0 {
|
|
||||||
current, next = next, current[:0]
|
|
||||||
count, nextCount = nextCount, map[reflect.Type]int{}
|
|
||||||
|
|
||||||
for _, f := range current {
|
|
||||||
if visited[f.typ] {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
visited[f.typ] = true
|
|
||||||
|
|
||||||
// Scan f.typ for fields to include.
|
|
||||||
for i := 0; i < f.typ.NumField(); i++ {
|
|
||||||
sf := f.typ.Field(i)
|
|
||||||
if sf.PkgPath != "" && !sf.Anonymous { // unexported
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
opts := getOptions(sf.Tag)
|
|
||||||
if opts.skip {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
index := make([]int, len(f.index)+1)
|
|
||||||
copy(index, f.index)
|
|
||||||
index[len(f.index)] = i
|
|
||||||
|
|
||||||
ft := sf.Type
|
|
||||||
if ft.Name() == "" && ft.Kind() == reflect.Ptr {
|
|
||||||
// Follow pointer.
|
|
||||||
ft = ft.Elem()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Record found field and index sequence.
|
|
||||||
if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
|
|
||||||
tagged := opts.name != ""
|
|
||||||
name := opts.name
|
|
||||||
if name == "" {
|
|
||||||
name = sf.Name
|
|
||||||
}
|
|
||||||
fields = append(fields, field{name, tagged, index, ft})
|
|
||||||
if count[f.typ] > 1 {
|
|
||||||
// If there were multiple instances, add a second,
|
|
||||||
// so that the annihilation code will see a duplicate.
|
|
||||||
// It only cares about the distinction between 1 or 2,
|
|
||||||
// so don't bother generating any more copies.
|
|
||||||
fields = append(fields, fields[len(fields)-1])
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Record new anonymous struct to explore in next round.
|
|
||||||
nextCount[ft]++
|
|
||||||
if nextCount[ft] == 1 {
|
|
||||||
f := field{name: ft.Name(), index: index, typ: ft}
|
|
||||||
next = append(next, f)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sort.Sort(byName(fields))
|
|
||||||
|
|
||||||
// Delete all fields that are hidden by the Go rules for embedded fields,
|
|
||||||
// except that fields with TOML tags are promoted.
|
|
||||||
|
|
||||||
// The fields are sorted in primary order of name, secondary order
|
|
||||||
// of field index length. Loop over names; for each name, delete
|
|
||||||
// hidden fields by choosing the one dominant field that survives.
|
|
||||||
out := fields[:0]
|
|
||||||
for advance, i := 0, 0; i < len(fields); i += advance {
|
|
||||||
// One iteration per name.
|
|
||||||
// Find the sequence of fields with the name of this first field.
|
|
||||||
fi := fields[i]
|
|
||||||
name := fi.name
|
|
||||||
for advance = 1; i+advance < len(fields); advance++ {
|
|
||||||
fj := fields[i+advance]
|
|
||||||
if fj.name != name {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if advance == 1 { // Only one field with this name
|
|
||||||
out = append(out, fi)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
dominant, ok := dominantField(fields[i : i+advance])
|
|
||||||
if ok {
|
|
||||||
out = append(out, dominant)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fields = out
|
|
||||||
sort.Sort(byIndex(fields))
|
|
||||||
|
|
||||||
return fields
|
|
||||||
}
|
|
||||||
|
|
||||||
// dominantField looks through the fields, all of which are known to
|
|
||||||
// have the same name, to find the single field that dominates the
|
|
||||||
// others using Go's embedding rules, modified by the presence of
|
|
||||||
// TOML tags. If there are multiple top-level fields, the boolean
|
|
||||||
// will be false: This condition is an error in Go and we skip all
|
|
||||||
// the fields.
|
|
||||||
func dominantField(fields []field) (field, bool) {
|
|
||||||
// The fields are sorted in increasing index-length order. The winner
|
|
||||||
// must therefore be one with the shortest index length. Drop all
|
|
||||||
// longer entries, which is easy: just truncate the slice.
|
|
||||||
length := len(fields[0].index)
|
|
||||||
tagged := -1 // Index of first tagged field.
|
|
||||||
for i, f := range fields {
|
|
||||||
if len(f.index) > length {
|
|
||||||
fields = fields[:i]
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if f.tag {
|
|
||||||
if tagged >= 0 {
|
|
||||||
// Multiple tagged fields at the same level: conflict.
|
|
||||||
// Return no field.
|
|
||||||
return field{}, false
|
|
||||||
}
|
|
||||||
tagged = i
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if tagged >= 0 {
|
|
||||||
return fields[tagged], true
|
|
||||||
}
|
|
||||||
// All remaining fields have the same length. If there's more than one,
|
|
||||||
// we have a conflict (two fields named "X" at the same level) and we
|
|
||||||
// return no field.
|
|
||||||
if len(fields) > 1 {
|
|
||||||
return field{}, false
|
|
||||||
}
|
|
||||||
return fields[0], true
|
|
||||||
}
|
|
||||||
|
|
||||||
var fieldCache struct {
|
|
||||||
sync.RWMutex
|
|
||||||
m map[reflect.Type][]field
|
|
||||||
}
|
|
||||||
|
|
||||||
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
|
|
||||||
func cachedTypeFields(t reflect.Type) []field {
|
|
||||||
fieldCache.RLock()
|
|
||||||
f := fieldCache.m[t]
|
|
||||||
fieldCache.RUnlock()
|
|
||||||
if f != nil {
|
|
||||||
return f
|
|
||||||
}
|
|
||||||
|
|
||||||
// Compute fields without lock.
|
|
||||||
// Might duplicate effort but won't hold other computations back.
|
|
||||||
f = typeFields(t)
|
|
||||||
if f == nil {
|
|
||||||
f = []field{}
|
|
||||||
}
|
|
||||||
|
|
||||||
fieldCache.Lock()
|
|
||||||
if fieldCache.m == nil {
|
|
||||||
fieldCache.m = map[reflect.Type][]field{}
|
|
||||||
}
|
|
||||||
fieldCache.m[t] = f
|
|
||||||
fieldCache.Unlock()
|
|
||||||
return f
|
|
||||||
}
|
|
66
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
66
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
|
@ -3473,6 +3473,30 @@ var awsPartition = partition{
|
||||||
"eu-west-1": endpoint{},
|
"eu-west-1": endpoint{},
|
||||||
"eu-west-2": endpoint{},
|
"eu-west-2": endpoint{},
|
||||||
"eu-west-3": endpoint{},
|
"eu-west-3": endpoint{},
|
||||||
|
"fips-us-east-1": endpoint{
|
||||||
|
Hostname: "logs-fips.us-east-1.amazonaws.com",
|
||||||
|
CredentialScope: credentialScope{
|
||||||
|
Region: "us-east-1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"fips-us-east-2": endpoint{
|
||||||
|
Hostname: "logs-fips.us-east-2.amazonaws.com",
|
||||||
|
CredentialScope: credentialScope{
|
||||||
|
Region: "us-east-2",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"fips-us-west-1": endpoint{
|
||||||
|
Hostname: "logs-fips.us-west-1.amazonaws.com",
|
||||||
|
CredentialScope: credentialScope{
|
||||||
|
Region: "us-west-1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"fips-us-west-2": endpoint{
|
||||||
|
Hostname: "logs-fips.us-west-2.amazonaws.com",
|
||||||
|
CredentialScope: credentialScope{
|
||||||
|
Region: "us-west-2",
|
||||||
|
},
|
||||||
|
},
|
||||||
"me-south-1": endpoint{},
|
"me-south-1": endpoint{},
|
||||||
"sa-east-1": endpoint{},
|
"sa-east-1": endpoint{},
|
||||||
"us-east-1": endpoint{},
|
"us-east-1": endpoint{},
|
||||||
|
@ -6649,6 +6673,25 @@ var awscnPartition = partition{
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
"organizations": service{
|
||||||
|
PartitionEndpoint: "aws-cn-global",
|
||||||
|
IsRegionalized: boxedFalse,
|
||||||
|
|
||||||
|
Endpoints: endpoints{
|
||||||
|
"aws-cn-global": endpoint{
|
||||||
|
Hostname: "organizations.cn-northwest-1.amazonaws.com.cn",
|
||||||
|
CredentialScope: credentialScope{
|
||||||
|
Region: "cn-northwest-1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"fips-aws-cn-global": endpoint{
|
||||||
|
Hostname: "organizations.cn-northwest-1.amazonaws.com.cn",
|
||||||
|
CredentialScope: credentialScope{
|
||||||
|
Region: "cn-northwest-1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
"polly": service{
|
"polly": service{
|
||||||
|
|
||||||
Endpoints: endpoints{
|
Endpoints: endpoints{
|
||||||
|
@ -7404,7 +7447,7 @@ var awsusgovPartition = partition{
|
||||||
|
|
||||||
Endpoints: endpoints{
|
Endpoints: endpoints{
|
||||||
"fips": endpoint{
|
"fips": endpoint{
|
||||||
Hostname: "elasticache-fips.us-gov-west-1.amazonaws.com",
|
Hostname: "elasticache.us-gov-west-1.amazonaws.com",
|
||||||
CredentialScope: credentialScope{
|
CredentialScope: credentialScope{
|
||||||
Region: "us-gov-west-1",
|
Region: "us-gov-west-1",
|
||||||
},
|
},
|
||||||
|
@ -7704,6 +7747,13 @@ var awsusgovPartition = partition{
|
||||||
"us-gov-west-1": endpoint{},
|
"us-gov-west-1": endpoint{},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
"kinesisanalytics": service{
|
||||||
|
|
||||||
|
Endpoints: endpoints{
|
||||||
|
"us-gov-east-1": endpoint{},
|
||||||
|
"us-gov-west-1": endpoint{},
|
||||||
|
},
|
||||||
|
},
|
||||||
"kms": service{
|
"kms": service{
|
||||||
|
|
||||||
Endpoints: endpoints{
|
Endpoints: endpoints{
|
||||||
|
@ -7758,8 +7808,18 @@ var awsusgovPartition = partition{
|
||||||
"logs": service{
|
"logs": service{
|
||||||
|
|
||||||
Endpoints: endpoints{
|
Endpoints: endpoints{
|
||||||
"us-gov-east-1": endpoint{},
|
"us-gov-east-1": endpoint{
|
||||||
"us-gov-west-1": endpoint{},
|
Hostname: "logs.us-gov-east-1.amazonaws.com",
|
||||||
|
CredentialScope: credentialScope{
|
||||||
|
Region: "us-gov-east-1",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"us-gov-west-1": endpoint{
|
||||||
|
Hostname: "logs.us-gov-west-1.amazonaws.com",
|
||||||
|
CredentialScope: credentialScope{
|
||||||
|
Region: "us-gov-west-1",
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
"mediaconvert": service{
|
"mediaconvert": service{
|
||||||
|
|
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
|
@ -5,4 +5,4 @@ package aws
|
||||||
const SDKName = "aws-sdk-go"
|
const SDKName = "aws-sdk-go"
|
||||||
|
|
||||||
// SDKVersion is the version of this SDK
|
// SDKVersion is the version of this SDK
|
||||||
const SDKVersion = "1.32.5"
|
const SDKVersion = "1.32.10"
|
||||||
|
|
16
vendor/golang.org/x/sys/unix/syscall_linux.go
generated
vendored
16
vendor/golang.org/x/sys/unix/syscall_linux.go
generated
vendored
|
@ -1950,6 +1950,20 @@ func Vmsplice(fd int, iovs []Iovec, flags int) (int, error) {
|
||||||
return int(n), nil
|
return int(n), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func isGroupMember(gid int) bool {
|
||||||
|
groups, err := Getgroups()
|
||||||
|
if err != nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, g := range groups {
|
||||||
|
if g == gid {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
//sys faccessat(dirfd int, path string, mode uint32) (err error)
|
//sys faccessat(dirfd int, path string, mode uint32) (err error)
|
||||||
|
|
||||||
func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) {
|
func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) {
|
||||||
|
@ -2007,7 +2021,7 @@ func Faccessat(dirfd int, path string, mode uint32, flags int) (err error) {
|
||||||
gid = Getgid()
|
gid = Getgid()
|
||||||
}
|
}
|
||||||
|
|
||||||
if uint32(gid) == st.Gid {
|
if uint32(gid) == st.Gid || isGroupMember(gid) {
|
||||||
fmode = (st.Mode >> 3) & 7
|
fmode = (st.Mode >> 3) & 7
|
||||||
} else {
|
} else {
|
||||||
fmode = st.Mode & 7
|
fmode = st.Mode & 7
|
||||||
|
|
12
vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go
generated
vendored
12
vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go
generated
vendored
|
@ -125,9 +125,9 @@ type Statfs_t struct {
|
||||||
Owner uint32
|
Owner uint32
|
||||||
Fsid Fsid
|
Fsid Fsid
|
||||||
Charspare [80]int8
|
Charspare [80]int8
|
||||||
Fstypename [16]int8
|
Fstypename [16]byte
|
||||||
Mntfromname [1024]int8
|
Mntfromname [1024]byte
|
||||||
Mntonname [1024]int8
|
Mntonname [1024]byte
|
||||||
}
|
}
|
||||||
|
|
||||||
type statfs_freebsd11_t struct {
|
type statfs_freebsd11_t struct {
|
||||||
|
@ -150,9 +150,9 @@ type statfs_freebsd11_t struct {
|
||||||
Owner uint32
|
Owner uint32
|
||||||
Fsid Fsid
|
Fsid Fsid
|
||||||
Charspare [80]int8
|
Charspare [80]int8
|
||||||
Fstypename [16]int8
|
Fstypename [16]byte
|
||||||
Mntfromname [88]int8
|
Mntfromname [88]byte
|
||||||
Mntonname [88]int8
|
Mntonname [88]byte
|
||||||
}
|
}
|
||||||
|
|
||||||
type Flock_t struct {
|
type Flock_t struct {
|
||||||
|
|
11
vendor/golang.org/x/tools/cmd/goimports/goimports.go
generated
vendored
11
vendor/golang.org/x/tools/cmd/goimports/goimports.go
generated
vendored
|
@ -10,7 +10,6 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"go/build"
|
|
||||||
"go/scanner"
|
"go/scanner"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
|
@ -43,15 +42,7 @@ var (
|
||||||
TabIndent: true,
|
TabIndent: true,
|
||||||
Comments: true,
|
Comments: true,
|
||||||
Fragment: true,
|
Fragment: true,
|
||||||
// This environment, and its caches, will be reused for the whole run.
|
Env: &imports.ProcessEnv{},
|
||||||
Env: &imports.ProcessEnv{
|
|
||||||
GOPATH: build.Default.GOPATH,
|
|
||||||
GOROOT: build.Default.GOROOT,
|
|
||||||
GOFLAGS: os.Getenv("GOFLAGS"),
|
|
||||||
GO111MODULE: os.Getenv("GO111MODULE"),
|
|
||||||
GOPROXY: os.Getenv("GOPROXY"),
|
|
||||||
GOSUMDB: os.Getenv("GOSUMDB"),
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
exitCode = 0
|
exitCode = 0
|
||||||
)
|
)
|
||||||
|
|
237
vendor/golang.org/x/tools/go/analysis/analysis.go
generated
vendored
237
vendor/golang.org/x/tools/go/analysis/analysis.go
generated
vendored
|
@ -1,237 +0,0 @@
|
||||||
package analysis
|
|
||||||
|
|
||||||
import (
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"go/ast"
|
|
||||||
"go/token"
|
|
||||||
"go/types"
|
|
||||||
"reflect"
|
|
||||||
|
|
||||||
"golang.org/x/tools/internal/analysisinternal"
|
|
||||||
)
|
|
||||||
|
|
||||||
// An Analyzer describes an analysis function and its options.
|
|
||||||
type Analyzer struct {
|
|
||||||
// The Name of the analyzer must be a valid Go identifier
|
|
||||||
// as it may appear in command-line flags, URLs, and so on.
|
|
||||||
Name string
|
|
||||||
|
|
||||||
// Doc is the documentation for the analyzer.
|
|
||||||
// The part before the first "\n\n" is the title
|
|
||||||
// (no capital or period, max ~60 letters).
|
|
||||||
Doc string
|
|
||||||
|
|
||||||
// Flags defines any flags accepted by the analyzer.
|
|
||||||
// The manner in which these flags are exposed to the user
|
|
||||||
// depends on the driver which runs the analyzer.
|
|
||||||
Flags flag.FlagSet
|
|
||||||
|
|
||||||
// Run applies the analyzer to a package.
|
|
||||||
// It returns an error if the analyzer failed.
|
|
||||||
//
|
|
||||||
// On success, the Run function may return a result
|
|
||||||
// computed by the Analyzer; its type must match ResultType.
|
|
||||||
// The driver makes this result available as an input to
|
|
||||||
// another Analyzer that depends directly on this one (see
|
|
||||||
// Requires) when it analyzes the same package.
|
|
||||||
//
|
|
||||||
// To pass analysis results between packages (and thus
|
|
||||||
// potentially between address spaces), use Facts, which are
|
|
||||||
// serializable.
|
|
||||||
Run func(*Pass) (interface{}, error)
|
|
||||||
|
|
||||||
// RunDespiteErrors allows the driver to invoke
|
|
||||||
// the Run method of this analyzer even on a
|
|
||||||
// package that contains parse or type errors.
|
|
||||||
RunDespiteErrors bool
|
|
||||||
|
|
||||||
// Requires is a set of analyzers that must run successfully
|
|
||||||
// before this one on a given package. This analyzer may inspect
|
|
||||||
// the outputs produced by each analyzer in Requires.
|
|
||||||
// The graph over analyzers implied by Requires edges must be acyclic.
|
|
||||||
//
|
|
||||||
// Requires establishes a "horizontal" dependency between
|
|
||||||
// analysis passes (different analyzers, same package).
|
|
||||||
Requires []*Analyzer
|
|
||||||
|
|
||||||
// ResultType is the type of the optional result of the Run function.
|
|
||||||
ResultType reflect.Type
|
|
||||||
|
|
||||||
// FactTypes indicates that this analyzer imports and exports
|
|
||||||
// Facts of the specified concrete types.
|
|
||||||
// An analyzer that uses facts may assume that its import
|
|
||||||
// dependencies have been similarly analyzed before it runs.
|
|
||||||
// Facts must be pointers.
|
|
||||||
//
|
|
||||||
// FactTypes establishes a "vertical" dependency between
|
|
||||||
// analysis passes (same analyzer, different packages).
|
|
||||||
FactTypes []Fact
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *Analyzer) String() string { return a.Name }
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
// Set the analysisinternal functions to be able to pass type errors
|
|
||||||
// to the Pass type without modifying the go/analysis API.
|
|
||||||
analysisinternal.SetTypeErrors = func(p interface{}, errors []types.Error) {
|
|
||||||
p.(*Pass).typeErrors = errors
|
|
||||||
}
|
|
||||||
analysisinternal.GetTypeErrors = func(p interface{}) []types.Error {
|
|
||||||
return p.(*Pass).typeErrors
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Pass provides information to the Run function that
|
|
||||||
// applies a specific analyzer to a single Go package.
|
|
||||||
//
|
|
||||||
// It forms the interface between the analysis logic and the driver
|
|
||||||
// program, and has both input and an output components.
|
|
||||||
//
|
|
||||||
// As in a compiler, one pass may depend on the result computed by another.
|
|
||||||
//
|
|
||||||
// The Run function should not call any of the Pass functions concurrently.
|
|
||||||
type Pass struct {
|
|
||||||
Analyzer *Analyzer // the identity of the current analyzer
|
|
||||||
|
|
||||||
// syntax and type information
|
|
||||||
Fset *token.FileSet // file position information
|
|
||||||
Files []*ast.File // the abstract syntax tree of each file
|
|
||||||
OtherFiles []string // names of non-Go files of this package
|
|
||||||
Pkg *types.Package // type information about the package
|
|
||||||
TypesInfo *types.Info // type information about the syntax trees
|
|
||||||
TypesSizes types.Sizes // function for computing sizes of types
|
|
||||||
|
|
||||||
// Report reports a Diagnostic, a finding about a specific location
|
|
||||||
// in the analyzed source code such as a potential mistake.
|
|
||||||
// It may be called by the Run function.
|
|
||||||
Report func(Diagnostic)
|
|
||||||
|
|
||||||
// ResultOf provides the inputs to this analysis pass, which are
|
|
||||||
// the corresponding results of its prerequisite analyzers.
|
|
||||||
// The map keys are the elements of Analysis.Required,
|
|
||||||
// and the type of each corresponding value is the required
|
|
||||||
// analysis's ResultType.
|
|
||||||
ResultOf map[*Analyzer]interface{}
|
|
||||||
|
|
||||||
// -- facts --
|
|
||||||
|
|
||||||
// ImportObjectFact retrieves a fact associated with obj.
|
|
||||||
// Given a value ptr of type *T, where *T satisfies Fact,
|
|
||||||
// ImportObjectFact copies the value to *ptr.
|
|
||||||
//
|
|
||||||
// ImportObjectFact panics if called after the pass is complete.
|
|
||||||
// ImportObjectFact is not concurrency-safe.
|
|
||||||
ImportObjectFact func(obj types.Object, fact Fact) bool
|
|
||||||
|
|
||||||
// ImportPackageFact retrieves a fact associated with package pkg,
|
|
||||||
// which must be this package or one of its dependencies.
|
|
||||||
// See comments for ImportObjectFact.
|
|
||||||
ImportPackageFact func(pkg *types.Package, fact Fact) bool
|
|
||||||
|
|
||||||
// ExportObjectFact associates a fact of type *T with the obj,
|
|
||||||
// replacing any previous fact of that type.
|
|
||||||
//
|
|
||||||
// ExportObjectFact panics if it is called after the pass is
|
|
||||||
// complete, or if obj does not belong to the package being analyzed.
|
|
||||||
// ExportObjectFact is not concurrency-safe.
|
|
||||||
ExportObjectFact func(obj types.Object, fact Fact)
|
|
||||||
|
|
||||||
// ExportPackageFact associates a fact with the current package.
|
|
||||||
// See comments for ExportObjectFact.
|
|
||||||
ExportPackageFact func(fact Fact)
|
|
||||||
|
|
||||||
// AllPackageFacts returns a new slice containing all package facts of the analysis's FactTypes
|
|
||||||
// in unspecified order.
|
|
||||||
// WARNING: This is an experimental API and may change in the future.
|
|
||||||
AllPackageFacts func() []PackageFact
|
|
||||||
|
|
||||||
// AllObjectFacts returns a new slice containing all object facts of the analysis's FactTypes
|
|
||||||
// in unspecified order.
|
|
||||||
// WARNING: This is an experimental API and may change in the future.
|
|
||||||
AllObjectFacts func() []ObjectFact
|
|
||||||
|
|
||||||
// typeErrors contains types.Errors that are associated with the pkg.
|
|
||||||
typeErrors []types.Error
|
|
||||||
|
|
||||||
/* Further fields may be added in future. */
|
|
||||||
// For example, suggested or applied refactorings.
|
|
||||||
}
|
|
||||||
|
|
||||||
// PackageFact is a package together with an associated fact.
|
|
||||||
// WARNING: This is an experimental API and may change in the future.
|
|
||||||
type PackageFact struct {
|
|
||||||
Package *types.Package
|
|
||||||
Fact Fact
|
|
||||||
}
|
|
||||||
|
|
||||||
// ObjectFact is an object together with an associated fact.
|
|
||||||
// WARNING: This is an experimental API and may change in the future.
|
|
||||||
type ObjectFact struct {
|
|
||||||
Object types.Object
|
|
||||||
Fact Fact
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reportf is a helper function that reports a Diagnostic using the
|
|
||||||
// specified position and formatted error message.
|
|
||||||
func (pass *Pass) Reportf(pos token.Pos, format string, args ...interface{}) {
|
|
||||||
msg := fmt.Sprintf(format, args...)
|
|
||||||
pass.Report(Diagnostic{Pos: pos, Message: msg})
|
|
||||||
}
|
|
||||||
|
|
||||||
// The Range interface provides a range. It's equivalent to and satisfied by
|
|
||||||
// ast.Node.
|
|
||||||
type Range interface {
|
|
||||||
Pos() token.Pos // position of first character belonging to the node
|
|
||||||
End() token.Pos // position of first character immediately after the node
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReportRangef is a helper function that reports a Diagnostic using the
|
|
||||||
// range provided. ast.Node values can be passed in as the range because
|
|
||||||
// they satisfy the Range interface.
|
|
||||||
func (pass *Pass) ReportRangef(rng Range, format string, args ...interface{}) {
|
|
||||||
msg := fmt.Sprintf(format, args...)
|
|
||||||
pass.Report(Diagnostic{Pos: rng.Pos(), End: rng.End(), Message: msg})
|
|
||||||
}
|
|
||||||
|
|
||||||
func (pass *Pass) String() string {
|
|
||||||
return fmt.Sprintf("%s@%s", pass.Analyzer.Name, pass.Pkg.Path())
|
|
||||||
}
|
|
||||||
|
|
||||||
// A Fact is an intermediate fact produced during analysis.
|
|
||||||
//
|
|
||||||
// Each fact is associated with a named declaration (a types.Object) or
|
|
||||||
// with a package as a whole. A single object or package may have
|
|
||||||
// multiple associated facts, but only one of any particular fact type.
|
|
||||||
//
|
|
||||||
// A Fact represents a predicate such as "never returns", but does not
|
|
||||||
// represent the subject of the predicate such as "function F" or "package P".
|
|
||||||
//
|
|
||||||
// Facts may be produced in one analysis pass and consumed by another
|
|
||||||
// analysis pass even if these are in different address spaces.
|
|
||||||
// If package P imports Q, all facts about Q produced during
|
|
||||||
// analysis of that package will be available during later analysis of P.
|
|
||||||
// Facts are analogous to type export data in a build system:
|
|
||||||
// just as export data enables separate compilation of several passes,
|
|
||||||
// facts enable "separate analysis".
|
|
||||||
//
|
|
||||||
// Each pass (a, p) starts with the set of facts produced by the
|
|
||||||
// same analyzer a applied to the packages directly imported by p.
|
|
||||||
// The analysis may add facts to the set, and they may be exported in turn.
|
|
||||||
// An analysis's Run function may retrieve facts by calling
|
|
||||||
// Pass.Import{Object,Package}Fact and update them using
|
|
||||||
// Pass.Export{Object,Package}Fact.
|
|
||||||
//
|
|
||||||
// A fact is logically private to its Analysis. To pass values
|
|
||||||
// between different analyzers, use the results mechanism;
|
|
||||||
// see Analyzer.Requires, Analyzer.ResultType, and Pass.ResultOf.
|
|
||||||
//
|
|
||||||
// A Fact type must be a pointer.
|
|
||||||
// Facts are encoded and decoded using encoding/gob.
|
|
||||||
// A Fact may implement the GobEncoder/GobDecoder interfaces
|
|
||||||
// to customize its encoding. Fact encoding should not fail.
|
|
||||||
//
|
|
||||||
// A Fact should not be modified once exported.
|
|
||||||
type Fact interface {
|
|
||||||
AFact() // dummy method to avoid type errors
|
|
||||||
}
|
|
61
vendor/golang.org/x/tools/go/analysis/diagnostic.go
generated
vendored
61
vendor/golang.org/x/tools/go/analysis/diagnostic.go
generated
vendored
|
@ -1,61 +0,0 @@
|
||||||
package analysis
|
|
||||||
|
|
||||||
import "go/token"
|
|
||||||
|
|
||||||
// A Diagnostic is a message associated with a source location or range.
|
|
||||||
//
|
|
||||||
// An Analyzer may return a variety of diagnostics; the optional Category,
|
|
||||||
// which should be a constant, may be used to classify them.
|
|
||||||
// It is primarily intended to make it easy to look up documentation.
|
|
||||||
//
|
|
||||||
// If End is provided, the diagnostic is specified to apply to the range between
|
|
||||||
// Pos and End.
|
|
||||||
type Diagnostic struct {
|
|
||||||
Pos token.Pos
|
|
||||||
End token.Pos // optional
|
|
||||||
Category string // optional
|
|
||||||
Message string
|
|
||||||
|
|
||||||
// SuggestedFixes contains suggested fixes for a diagnostic which can be used to perform
|
|
||||||
// edits to a file that address the diagnostic.
|
|
||||||
// TODO(matloob): Should multiple SuggestedFixes be allowed for a diagnostic?
|
|
||||||
// Diagnostics should not contain SuggestedFixes that overlap.
|
|
||||||
// Experimental: This API is experimental and may change in the future.
|
|
||||||
SuggestedFixes []SuggestedFix // optional
|
|
||||||
|
|
||||||
// Experimental: This API is experimental and may change in the future.
|
|
||||||
Related []RelatedInformation // optional
|
|
||||||
}
|
|
||||||
|
|
||||||
// RelatedInformation contains information related to a diagnostic.
|
|
||||||
// For example, a diagnostic that flags duplicated declarations of a
|
|
||||||
// variable may include one RelatedInformation per existing
|
|
||||||
// declaration.
|
|
||||||
type RelatedInformation struct {
|
|
||||||
Pos token.Pos
|
|
||||||
End token.Pos
|
|
||||||
Message string
|
|
||||||
}
|
|
||||||
|
|
||||||
// A SuggestedFix is a code change associated with a Diagnostic that a user can choose
|
|
||||||
// to apply to their code. Usually the SuggestedFix is meant to fix the issue flagged
|
|
||||||
// by the diagnostic.
|
|
||||||
// TextEdits for a SuggestedFix should not overlap. TextEdits for a SuggestedFix
|
|
||||||
// should not contain edits for other packages.
|
|
||||||
// Experimental: This API is experimental and may change in the future.
|
|
||||||
type SuggestedFix struct {
|
|
||||||
// A description for this suggested fix to be shown to a user deciding
|
|
||||||
// whether to accept it.
|
|
||||||
Message string
|
|
||||||
TextEdits []TextEdit
|
|
||||||
}
|
|
||||||
|
|
||||||
// A TextEdit represents the replacement of the code between Pos and End with the new text.
|
|
||||||
// Each TextEdit should apply to a single file. End should not be earlier in the file than Pos.
|
|
||||||
// Experimental: This API is experimental and may change in the future.
|
|
||||||
type TextEdit struct {
|
|
||||||
// For a pure insertion, End can either be set to Pos or token.NoPos.
|
|
||||||
Pos token.Pos
|
|
||||||
End token.Pos
|
|
||||||
NewText []byte
|
|
||||||
}
|
|
310
vendor/golang.org/x/tools/go/analysis/doc.go
generated
vendored
310
vendor/golang.org/x/tools/go/analysis/doc.go
generated
vendored
|
@ -1,310 +0,0 @@
|
||||||
/*
|
|
||||||
|
|
||||||
Package analysis defines the interface between a modular static
|
|
||||||
analysis and an analysis driver program.
|
|
||||||
|
|
||||||
|
|
||||||
Background
|
|
||||||
|
|
||||||
A static analysis is a function that inspects a package of Go code and
|
|
||||||
reports a set of diagnostics (typically mistakes in the code), and
|
|
||||||
perhaps produces other results as well, such as suggested refactorings
|
|
||||||
or other facts. An analysis that reports mistakes is informally called a
|
|
||||||
"checker". For example, the printf checker reports mistakes in
|
|
||||||
fmt.Printf format strings.
|
|
||||||
|
|
||||||
A "modular" analysis is one that inspects one package at a time but can
|
|
||||||
save information from a lower-level package and use it when inspecting a
|
|
||||||
higher-level package, analogous to separate compilation in a toolchain.
|
|
||||||
The printf checker is modular: when it discovers that a function such as
|
|
||||||
log.Fatalf delegates to fmt.Printf, it records this fact, and checks
|
|
||||||
calls to that function too, including calls made from another package.
|
|
||||||
|
|
||||||
By implementing a common interface, checkers from a variety of sources
|
|
||||||
can be easily selected, incorporated, and reused in a wide range of
|
|
||||||
driver programs including command-line tools (such as vet), text editors and
|
|
||||||
IDEs, build and test systems (such as go build, Bazel, or Buck), test
|
|
||||||
frameworks, code review tools, code-base indexers (such as SourceGraph),
|
|
||||||
documentation viewers (such as godoc), batch pipelines for large code
|
|
||||||
bases, and so on.
|
|
||||||
|
|
||||||
|
|
||||||
Analyzer
|
|
||||||
|
|
||||||
The primary type in the API is Analyzer. An Analyzer statically
|
|
||||||
describes an analysis function: its name, documentation, flags,
|
|
||||||
relationship to other analyzers, and of course, its logic.
|
|
||||||
|
|
||||||
To define an analysis, a user declares a (logically constant) variable
|
|
||||||
of type Analyzer. Here is a typical example from one of the analyzers in
|
|
||||||
the go/analysis/passes/ subdirectory:
|
|
||||||
|
|
||||||
package unusedresult
|
|
||||||
|
|
||||||
var Analyzer = &analysis.Analyzer{
|
|
||||||
Name: "unusedresult",
|
|
||||||
Doc: "check for unused results of calls to some functions",
|
|
||||||
Run: run,
|
|
||||||
...
|
|
||||||
}
|
|
||||||
|
|
||||||
func run(pass *analysis.Pass) (interface{}, error) {
|
|
||||||
...
|
|
||||||
}
|
|
||||||
|
|
||||||
An analysis driver is a program such as vet that runs a set of
|
|
||||||
analyses and prints the diagnostics that they report.
|
|
||||||
The driver program must import the list of Analyzers it needs.
|
|
||||||
Typically each Analyzer resides in a separate package.
|
|
||||||
To add a new Analyzer to an existing driver, add another item to the list:
|
|
||||||
|
|
||||||
import ( "unusedresult"; "nilness"; "printf" )
|
|
||||||
|
|
||||||
var analyses = []*analysis.Analyzer{
|
|
||||||
unusedresult.Analyzer,
|
|
||||||
nilness.Analyzer,
|
|
||||||
printf.Analyzer,
|
|
||||||
}
|
|
||||||
|
|
||||||
A driver may use the name, flags, and documentation to provide on-line
|
|
||||||
help that describes the analyses it performs.
|
|
||||||
The doc comment contains a brief one-line summary,
|
|
||||||
optionally followed by paragraphs of explanation.
|
|
||||||
|
|
||||||
The Analyzer type has more fields besides those shown above:
|
|
||||||
|
|
||||||
type Analyzer struct {
|
|
||||||
Name string
|
|
||||||
Doc string
|
|
||||||
Flags flag.FlagSet
|
|
||||||
Run func(*Pass) (interface{}, error)
|
|
||||||
RunDespiteErrors bool
|
|
||||||
ResultType reflect.Type
|
|
||||||
Requires []*Analyzer
|
|
||||||
FactTypes []Fact
|
|
||||||
}
|
|
||||||
|
|
||||||
The Flags field declares a set of named (global) flag variables that
|
|
||||||
control analysis behavior. Unlike vet, analysis flags are not declared
|
|
||||||
directly in the command line FlagSet; it is up to the driver to set the
|
|
||||||
flag variables. A driver for a single analysis, a, might expose its flag
|
|
||||||
f directly on the command line as -f, whereas a driver for multiple
|
|
||||||
analyses might prefix the flag name by the analysis name (-a.f) to avoid
|
|
||||||
ambiguity. An IDE might expose the flags through a graphical interface,
|
|
||||||
and a batch pipeline might configure them from a config file.
|
|
||||||
See the "findcall" analyzer for an example of flags in action.
|
|
||||||
|
|
||||||
The RunDespiteErrors flag indicates whether the analysis is equipped to
|
|
||||||
handle ill-typed code. If not, the driver will skip the analysis if
|
|
||||||
there were parse or type errors.
|
|
||||||
The optional ResultType field specifies the type of the result value
|
|
||||||
computed by this analysis and made available to other analyses.
|
|
||||||
The Requires field specifies a list of analyses upon which
|
|
||||||
this one depends and whose results it may access, and it constrains the
|
|
||||||
order in which a driver may run analyses.
|
|
||||||
The FactTypes field is discussed in the section on Modularity.
|
|
||||||
The analysis package provides a Validate function to perform basic
|
|
||||||
sanity checks on an Analyzer, such as that its Requires graph is
|
|
||||||
acyclic, its fact and result types are unique, and so on.
|
|
||||||
|
|
||||||
Finally, the Run field contains a function to be called by the driver to
|
|
||||||
execute the analysis on a single package. The driver passes it an
|
|
||||||
instance of the Pass type.
|
|
||||||
|
|
||||||
|
|
||||||
Pass
|
|
||||||
|
|
||||||
A Pass describes a single unit of work: the application of a particular
|
|
||||||
Analyzer to a particular package of Go code.
|
|
||||||
The Pass provides information to the Analyzer's Run function about the
|
|
||||||
package being analyzed, and provides operations to the Run function for
|
|
||||||
reporting diagnostics and other information back to the driver.
|
|
||||||
|
|
||||||
type Pass struct {
|
|
||||||
Fset *token.FileSet
|
|
||||||
Files []*ast.File
|
|
||||||
OtherFiles []string
|
|
||||||
Pkg *types.Package
|
|
||||||
TypesInfo *types.Info
|
|
||||||
ResultOf map[*Analyzer]interface{}
|
|
||||||
Report func(Diagnostic)
|
|
||||||
...
|
|
||||||
}
|
|
||||||
|
|
||||||
The Fset, Files, Pkg, and TypesInfo fields provide the syntax trees,
|
|
||||||
type information, and source positions for a single package of Go code.
|
|
||||||
|
|
||||||
The OtherFiles field provides the names, but not the contents, of non-Go
|
|
||||||
files such as assembly that are part of this package. See the "asmdecl"
|
|
||||||
or "buildtags" analyzers for examples of loading non-Go files and reporting
|
|
||||||
diagnostics against them.
|
|
||||||
|
|
||||||
The ResultOf field provides the results computed by the analyzers
|
|
||||||
required by this one, as expressed in its Analyzer.Requires field. The
|
|
||||||
driver runs the required analyzers first and makes their results
|
|
||||||
available in this map. Each Analyzer must return a value of the type
|
|
||||||
described in its Analyzer.ResultType field.
|
|
||||||
For example, the "ctrlflow" analyzer returns a *ctrlflow.CFGs, which
|
|
||||||
provides a control-flow graph for each function in the package (see
|
|
||||||
golang.org/x/tools/go/cfg); the "inspect" analyzer returns a value that
|
|
||||||
enables other Analyzers to traverse the syntax trees of the package more
|
|
||||||
efficiently; and the "buildssa" analyzer constructs an SSA-form
|
|
||||||
intermediate representation.
|
|
||||||
Each of these Analyzers extends the capabilities of later Analyzers
|
|
||||||
without adding a dependency to the core API, so an analysis tool pays
|
|
||||||
only for the extensions it needs.
|
|
||||||
|
|
||||||
The Report function emits a diagnostic, a message associated with a
|
|
||||||
source position. For most analyses, diagnostics are their primary
|
|
||||||
result.
|
|
||||||
For convenience, Pass provides a helper method, Reportf, to report a new
|
|
||||||
diagnostic by formatting a string.
|
|
||||||
Diagnostic is defined as:
|
|
||||||
|
|
||||||
type Diagnostic struct {
|
|
||||||
Pos token.Pos
|
|
||||||
Category string // optional
|
|
||||||
Message string
|
|
||||||
}
|
|
||||||
|
|
||||||
The optional Category field is a short identifier that classifies the
|
|
||||||
kind of message when an analysis produces several kinds of diagnostic.
|
|
||||||
|
|
||||||
Many analyses want to associate diagnostics with a severity level.
|
|
||||||
Because Diagnostic does not have a severity level field, an Analyzer's
|
|
||||||
diagnostics effectively all have the same severity level. To separate which
|
|
||||||
diagnostics are high severity and which are low severity, expose multiple
|
|
||||||
Analyzers instead. Analyzers should also be separated when their
|
|
||||||
diagnostics belong in different groups, or could be tagged differently
|
|
||||||
before being shown to the end user. Analyzers should document their severity
|
|
||||||
level to help downstream tools surface diagnostics properly.
|
|
||||||
|
|
||||||
Most Analyzers inspect typed Go syntax trees, but a few, such as asmdecl
|
|
||||||
and buildtag, inspect the raw text of Go source files or even non-Go
|
|
||||||
files such as assembly. To report a diagnostic against a line of a
|
|
||||||
raw text file, use the following sequence:
|
|
||||||
|
|
||||||
content, err := ioutil.ReadFile(filename)
|
|
||||||
if err != nil { ... }
|
|
||||||
tf := fset.AddFile(filename, -1, len(content))
|
|
||||||
tf.SetLinesForContent(content)
|
|
||||||
...
|
|
||||||
pass.Reportf(tf.LineStart(line), "oops")
|
|
||||||
|
|
||||||
|
|
||||||
Modular analysis with Facts
|
|
||||||
|
|
||||||
To improve efficiency and scalability, large programs are routinely
|
|
||||||
built using separate compilation: units of the program are compiled
|
|
||||||
separately, and recompiled only when one of their dependencies changes;
|
|
||||||
independent modules may be compiled in parallel. The same technique may
|
|
||||||
be applied to static analyses, for the same benefits. Such analyses are
|
|
||||||
described as "modular".
|
|
||||||
|
|
||||||
A compiler’s type checker is an example of a modular static analysis.
|
|
||||||
Many other checkers we would like to apply to Go programs can be
|
|
||||||
understood as alternative or non-standard type systems. For example,
|
|
||||||
vet's printf checker infers whether a function has the "printf wrapper"
|
|
||||||
type, and it applies stricter checks to calls of such functions. In
|
|
||||||
addition, it records which functions are printf wrappers for use by
|
|
||||||
later analysis passes to identify other printf wrappers by induction.
|
|
||||||
A result such as “f is a printf wrapper” that is not interesting by
|
|
||||||
itself but serves as a stepping stone to an interesting result (such as
|
|
||||||
a diagnostic) is called a "fact".
|
|
||||||
|
|
||||||
The analysis API allows an analysis to define new types of facts, to
|
|
||||||
associate facts of these types with objects (named entities) declared
|
|
||||||
within the current package, or with the package as a whole, and to query
|
|
||||||
for an existing fact of a given type associated with an object or
|
|
||||||
package.
|
|
||||||
|
|
||||||
An Analyzer that uses facts must declare their types:
|
|
||||||
|
|
||||||
var Analyzer = &analysis.Analyzer{
|
|
||||||
Name: "printf",
|
|
||||||
FactTypes: []analysis.Fact{new(isWrapper)},
|
|
||||||
...
|
|
||||||
}
|
|
||||||
|
|
||||||
type isWrapper struct{} // => *types.Func f “is a printf wrapper”
|
|
||||||
|
|
||||||
The driver program ensures that facts for a pass’s dependencies are
|
|
||||||
generated before analyzing the package and is responsible for propagating
|
|
||||||
facts from one package to another, possibly across address spaces.
|
|
||||||
Consequently, Facts must be serializable. The API requires that drivers
|
|
||||||
use the gob encoding, an efficient, robust, self-describing binary
|
|
||||||
protocol. A fact type may implement the GobEncoder/GobDecoder interfaces
|
|
||||||
if the default encoding is unsuitable. Facts should be stateless.
|
|
||||||
|
|
||||||
The Pass type has functions to import and export facts,
|
|
||||||
associated either with an object or with a package:
|
|
||||||
|
|
||||||
type Pass struct {
|
|
||||||
...
|
|
||||||
ExportObjectFact func(types.Object, Fact)
|
|
||||||
ImportObjectFact func(types.Object, Fact) bool
|
|
||||||
|
|
||||||
ExportPackageFact func(fact Fact)
|
|
||||||
ImportPackageFact func(*types.Package, Fact) bool
|
|
||||||
}
|
|
||||||
|
|
||||||
An Analyzer may only export facts associated with the current package or
|
|
||||||
its objects, though it may import facts from any package or object that
|
|
||||||
is an import dependency of the current package.
|
|
||||||
|
|
||||||
Conceptually, ExportObjectFact(obj, fact) inserts fact into a hidden map keyed by
|
|
||||||
the pair (obj, TypeOf(fact)), and the ImportObjectFact function
|
|
||||||
retrieves the entry from this map and copies its value into the variable
|
|
||||||
pointed to by fact. This scheme assumes that the concrete type of fact
|
|
||||||
is a pointer; this assumption is checked by the Validate function.
|
|
||||||
See the "printf" analyzer for an example of object facts in action.
|
|
||||||
|
|
||||||
Some driver implementations (such as those based on Bazel and Blaze) do
|
|
||||||
not currently apply analyzers to packages of the standard library.
|
|
||||||
Therefore, for best results, analyzer authors should not rely on
|
|
||||||
analysis facts being available for standard packages.
|
|
||||||
For example, although the printf checker is capable of deducing during
|
|
||||||
analysis of the log package that log.Printf is a printf wrapper,
|
|
||||||
this fact is built in to the analyzer so that it correctly checks
|
|
||||||
calls to log.Printf even when run in a driver that does not apply
|
|
||||||
it to standard packages. We would like to remove this limitation in future.
|
|
||||||
|
|
||||||
|
|
||||||
Testing an Analyzer
|
|
||||||
|
|
||||||
The analysistest subpackage provides utilities for testing an Analyzer.
|
|
||||||
In a few lines of code, it is possible to run an analyzer on a package
|
|
||||||
of testdata files and check that it reported all the expected
|
|
||||||
diagnostics and facts (and no more). Expectations are expressed using
|
|
||||||
"// want ..." comments in the input code.
|
|
||||||
|
|
||||||
|
|
||||||
Standalone commands
|
|
||||||
|
|
||||||
Analyzers are provided in the form of packages that a driver program is
|
|
||||||
expected to import. The vet command imports a set of several analyzers,
|
|
||||||
but users may wish to define their own analysis commands that perform
|
|
||||||
additional checks. To simplify the task of creating an analysis command,
|
|
||||||
either for a single analyzer or for a whole suite, we provide the
|
|
||||||
singlechecker and multichecker subpackages.
|
|
||||||
|
|
||||||
The singlechecker package provides the main function for a command that
|
|
||||||
runs one analyzer. By convention, each analyzer such as
|
|
||||||
go/passes/findcall should be accompanied by a singlechecker-based
|
|
||||||
command such as go/analysis/passes/findcall/cmd/findcall, defined in its
|
|
||||||
entirety as:
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"golang.org/x/tools/go/analysis/passes/findcall"
|
|
||||||
"golang.org/x/tools/go/analysis/singlechecker"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() { singlechecker.Main(findcall.Analyzer) }
|
|
||||||
|
|
||||||
A tool that provides multiple analyzers can use multichecker in a
|
|
||||||
similar way, giving it the list of Analyzers.
|
|
||||||
|
|
||||||
*/
|
|
||||||
package analysis
|
|
49
vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go
generated
vendored
49
vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go
generated
vendored
|
@ -1,49 +0,0 @@
|
||||||
// Copyright 2018 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package inspect defines an Analyzer that provides an AST inspector
|
|
||||||
// (golang.org/x/tools/go/ast/inspect.Inspect) for the syntax trees of a
|
|
||||||
// package. It is only a building block for other analyzers.
|
|
||||||
//
|
|
||||||
// Example of use in another analysis:
|
|
||||||
//
|
|
||||||
// import (
|
|
||||||
// "golang.org/x/tools/go/analysis"
|
|
||||||
// "golang.org/x/tools/go/analysis/passes/inspect"
|
|
||||||
// "golang.org/x/tools/go/ast/inspector"
|
|
||||||
// )
|
|
||||||
//
|
|
||||||
// var Analyzer = &analysis.Analyzer{
|
|
||||||
// ...
|
|
||||||
// Requires: []*analysis.Analyzer{inspect.Analyzer},
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// func run(pass *analysis.Pass) (interface{}, error) {
|
|
||||||
// inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
|
|
||||||
// inspect.Preorder(nil, func(n ast.Node) {
|
|
||||||
// ...
|
|
||||||
// })
|
|
||||||
// return nil
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
package inspect
|
|
||||||
|
|
||||||
import (
|
|
||||||
"reflect"
|
|
||||||
|
|
||||||
"golang.org/x/tools/go/analysis"
|
|
||||||
"golang.org/x/tools/go/ast/inspector"
|
|
||||||
)
|
|
||||||
|
|
||||||
var Analyzer = &analysis.Analyzer{
|
|
||||||
Name: "inspect",
|
|
||||||
Doc: "optimize AST traversal for later passes",
|
|
||||||
Run: run,
|
|
||||||
RunDespiteErrors: true,
|
|
||||||
ResultType: reflect.TypeOf(new(inspector.Inspector)),
|
|
||||||
}
|
|
||||||
|
|
||||||
func run(pass *analysis.Pass) (interface{}, error) {
|
|
||||||
return inspector.New(pass.Files), nil
|
|
||||||
}
|
|
97
vendor/golang.org/x/tools/go/analysis/validate.go
generated
vendored
97
vendor/golang.org/x/tools/go/analysis/validate.go
generated
vendored
|
@ -1,97 +0,0 @@
|
||||||
package analysis
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"unicode"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Validate reports an error if any of the analyzers are misconfigured.
|
|
||||||
// Checks include:
|
|
||||||
// that the name is a valid identifier;
|
|
||||||
// that the Requires graph is acyclic;
|
|
||||||
// that analyzer fact types are unique;
|
|
||||||
// that each fact type is a pointer.
|
|
||||||
func Validate(analyzers []*Analyzer) error {
|
|
||||||
// Map each fact type to its sole generating analyzer.
|
|
||||||
factTypes := make(map[reflect.Type]*Analyzer)
|
|
||||||
|
|
||||||
// Traverse the Requires graph, depth first.
|
|
||||||
const (
|
|
||||||
white = iota
|
|
||||||
grey
|
|
||||||
black
|
|
||||||
finished
|
|
||||||
)
|
|
||||||
color := make(map[*Analyzer]uint8)
|
|
||||||
var visit func(a *Analyzer) error
|
|
||||||
visit = func(a *Analyzer) error {
|
|
||||||
if a == nil {
|
|
||||||
return fmt.Errorf("nil *Analyzer")
|
|
||||||
}
|
|
||||||
if color[a] == white {
|
|
||||||
color[a] = grey
|
|
||||||
|
|
||||||
// names
|
|
||||||
if !validIdent(a.Name) {
|
|
||||||
return fmt.Errorf("invalid analyzer name %q", a)
|
|
||||||
}
|
|
||||||
|
|
||||||
if a.Doc == "" {
|
|
||||||
return fmt.Errorf("analyzer %q is undocumented", a)
|
|
||||||
}
|
|
||||||
|
|
||||||
// fact types
|
|
||||||
for _, f := range a.FactTypes {
|
|
||||||
if f == nil {
|
|
||||||
return fmt.Errorf("analyzer %s has nil FactType", a)
|
|
||||||
}
|
|
||||||
t := reflect.TypeOf(f)
|
|
||||||
if prev := factTypes[t]; prev != nil {
|
|
||||||
return fmt.Errorf("fact type %s registered by two analyzers: %v, %v",
|
|
||||||
t, a, prev)
|
|
||||||
}
|
|
||||||
if t.Kind() != reflect.Ptr {
|
|
||||||
return fmt.Errorf("%s: fact type %s is not a pointer", a, t)
|
|
||||||
}
|
|
||||||
factTypes[t] = a
|
|
||||||
}
|
|
||||||
|
|
||||||
// recursion
|
|
||||||
for i, req := range a.Requires {
|
|
||||||
if err := visit(req); err != nil {
|
|
||||||
return fmt.Errorf("%s.Requires[%d]: %v", a.Name, i, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
color[a] = black
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
for _, a := range analyzers {
|
|
||||||
if err := visit(a); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reject duplicates among analyzers.
|
|
||||||
// Precondition: color[a] == black.
|
|
||||||
// Postcondition: color[a] == finished.
|
|
||||||
for _, a := range analyzers {
|
|
||||||
if color[a] == finished {
|
|
||||||
return fmt.Errorf("duplicate analyzer: %s", a.Name)
|
|
||||||
}
|
|
||||||
color[a] = finished
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func validIdent(name string) bool {
|
|
||||||
for i, r := range name {
|
|
||||||
if !(r == '_' || unicode.IsLetter(r) || i > 0 && unicode.IsDigit(r)) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return name != ""
|
|
||||||
}
|
|
186
vendor/golang.org/x/tools/go/ast/inspector/inspector.go
generated
vendored
186
vendor/golang.org/x/tools/go/ast/inspector/inspector.go
generated
vendored
|
@ -1,186 +0,0 @@
|
||||||
// Copyright 2018 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package inspector provides helper functions for traversal over the
|
|
||||||
// syntax trees of a package, including node filtering by type, and
|
|
||||||
// materialization of the traversal stack.
|
|
||||||
//
|
|
||||||
// During construction, the inspector does a complete traversal and
|
|
||||||
// builds a list of push/pop events and their node type. Subsequent
|
|
||||||
// method calls that request a traversal scan this list, rather than walk
|
|
||||||
// the AST, and perform type filtering using efficient bit sets.
|
|
||||||
//
|
|
||||||
// Experiments suggest the inspector's traversals are about 2.5x faster
|
|
||||||
// than ast.Inspect, but it may take around 5 traversals for this
|
|
||||||
// benefit to amortize the inspector's construction cost.
|
|
||||||
// If efficiency is the primary concern, do not use Inspector for
|
|
||||||
// one-off traversals.
|
|
||||||
package inspector
|
|
||||||
|
|
||||||
// There are four orthogonal features in a traversal:
|
|
||||||
// 1 type filtering
|
|
||||||
// 2 pruning
|
|
||||||
// 3 postorder calls to f
|
|
||||||
// 4 stack
|
|
||||||
// Rather than offer all of them in the API,
|
|
||||||
// only a few combinations are exposed:
|
|
||||||
// - Preorder is the fastest and has fewest features,
|
|
||||||
// but is the most commonly needed traversal.
|
|
||||||
// - Nodes and WithStack both provide pruning and postorder calls,
|
|
||||||
// even though few clients need it, because supporting two versions
|
|
||||||
// is not justified.
|
|
||||||
// More combinations could be supported by expressing them as
|
|
||||||
// wrappers around a more generic traversal, but this was measured
|
|
||||||
// and found to degrade performance significantly (30%).
|
|
||||||
|
|
||||||
import (
|
|
||||||
"go/ast"
|
|
||||||
)
|
|
||||||
|
|
||||||
// An Inspector provides methods for inspecting
|
|
||||||
// (traversing) the syntax trees of a package.
|
|
||||||
type Inspector struct {
|
|
||||||
events []event
|
|
||||||
}
|
|
||||||
|
|
||||||
// New returns an Inspector for the specified syntax trees.
|
|
||||||
func New(files []*ast.File) *Inspector {
|
|
||||||
return &Inspector{traverse(files)}
|
|
||||||
}
|
|
||||||
|
|
||||||
// An event represents a push or a pop
|
|
||||||
// of an ast.Node during a traversal.
|
|
||||||
type event struct {
|
|
||||||
node ast.Node
|
|
||||||
typ uint64 // typeOf(node)
|
|
||||||
index int // 1 + index of corresponding pop event, or 0 if this is a pop
|
|
||||||
}
|
|
||||||
|
|
||||||
// Preorder visits all the nodes of the files supplied to New in
|
|
||||||
// depth-first order. It calls f(n) for each node n before it visits
|
|
||||||
// n's children.
|
|
||||||
//
|
|
||||||
// The types argument, if non-empty, enables type-based filtering of
|
|
||||||
// events. The function f if is called only for nodes whose type
|
|
||||||
// matches an element of the types slice.
|
|
||||||
func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) {
|
|
||||||
// Because it avoids postorder calls to f, and the pruning
|
|
||||||
// check, Preorder is almost twice as fast as Nodes. The two
|
|
||||||
// features seem to contribute similar slowdowns (~1.4x each).
|
|
||||||
|
|
||||||
mask := maskOf(types)
|
|
||||||
for i := 0; i < len(in.events); {
|
|
||||||
ev := in.events[i]
|
|
||||||
if ev.typ&mask != 0 {
|
|
||||||
if ev.index > 0 {
|
|
||||||
f(ev.node)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Nodes visits the nodes of the files supplied to New in depth-first
|
|
||||||
// order. It calls f(n, true) for each node n before it visits n's
|
|
||||||
// children. If f returns true, Nodes invokes f recursively for each
|
|
||||||
// of the non-nil children of the node, followed by a call of
|
|
||||||
// f(n, false).
|
|
||||||
//
|
|
||||||
// The types argument, if non-empty, enables type-based filtering of
|
|
||||||
// events. The function f if is called only for nodes whose type
|
|
||||||
// matches an element of the types slice.
|
|
||||||
func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (proceed bool)) {
|
|
||||||
mask := maskOf(types)
|
|
||||||
for i := 0; i < len(in.events); {
|
|
||||||
ev := in.events[i]
|
|
||||||
if ev.typ&mask != 0 {
|
|
||||||
if ev.index > 0 {
|
|
||||||
// push
|
|
||||||
if !f(ev.node, true) {
|
|
||||||
i = ev.index // jump to corresponding pop + 1
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// pop
|
|
||||||
f(ev.node, false)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithStack visits nodes in a similar manner to Nodes, but it
|
|
||||||
// supplies each call to f an additional argument, the current
|
|
||||||
// traversal stack. The stack's first element is the outermost node,
|
|
||||||
// an *ast.File; its last is the innermost, n.
|
|
||||||
func (in *Inspector) WithStack(types []ast.Node, f func(n ast.Node, push bool, stack []ast.Node) (proceed bool)) {
|
|
||||||
mask := maskOf(types)
|
|
||||||
var stack []ast.Node
|
|
||||||
for i := 0; i < len(in.events); {
|
|
||||||
ev := in.events[i]
|
|
||||||
if ev.index > 0 {
|
|
||||||
// push
|
|
||||||
stack = append(stack, ev.node)
|
|
||||||
if ev.typ&mask != 0 {
|
|
||||||
if !f(ev.node, true, stack) {
|
|
||||||
i = ev.index
|
|
||||||
stack = stack[:len(stack)-1]
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// pop
|
|
||||||
if ev.typ&mask != 0 {
|
|
||||||
f(ev.node, false, stack)
|
|
||||||
}
|
|
||||||
stack = stack[:len(stack)-1]
|
|
||||||
}
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// traverse builds the table of events representing a traversal.
|
|
||||||
func traverse(files []*ast.File) []event {
|
|
||||||
// Preallocate approximate number of events
|
|
||||||
// based on source file extent.
|
|
||||||
// This makes traverse faster by 4x (!).
|
|
||||||
var extent int
|
|
||||||
for _, f := range files {
|
|
||||||
extent += int(f.End() - f.Pos())
|
|
||||||
}
|
|
||||||
// This estimate is based on the net/http package.
|
|
||||||
capacity := extent * 33 / 100
|
|
||||||
if capacity > 1e6 {
|
|
||||||
capacity = 1e6 // impose some reasonable maximum
|
|
||||||
}
|
|
||||||
events := make([]event, 0, capacity)
|
|
||||||
|
|
||||||
var stack []event
|
|
||||||
for _, f := range files {
|
|
||||||
ast.Inspect(f, func(n ast.Node) bool {
|
|
||||||
if n != nil {
|
|
||||||
// push
|
|
||||||
ev := event{
|
|
||||||
node: n,
|
|
||||||
typ: typeOf(n),
|
|
||||||
index: len(events), // push event temporarily holds own index
|
|
||||||
}
|
|
||||||
stack = append(stack, ev)
|
|
||||||
events = append(events, ev)
|
|
||||||
} else {
|
|
||||||
// pop
|
|
||||||
ev := stack[len(stack)-1]
|
|
||||||
stack = stack[:len(stack)-1]
|
|
||||||
|
|
||||||
events[ev.index].index = len(events) + 1 // make push refer to pop
|
|
||||||
|
|
||||||
ev.index = 0 // turn ev into a pop event
|
|
||||||
events = append(events, ev)
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return events
|
|
||||||
}
|
|
216
vendor/golang.org/x/tools/go/ast/inspector/typeof.go
generated
vendored
216
vendor/golang.org/x/tools/go/ast/inspector/typeof.go
generated
vendored
|
@ -1,216 +0,0 @@
|
||||||
package inspector
|
|
||||||
|
|
||||||
// This file defines func typeOf(ast.Node) uint64.
|
|
||||||
//
|
|
||||||
// The initial map-based implementation was too slow;
|
|
||||||
// see https://go-review.googlesource.com/c/tools/+/135655/1/go/ast/inspector/inspector.go#196
|
|
||||||
|
|
||||||
import "go/ast"
|
|
||||||
|
|
||||||
const (
|
|
||||||
nArrayType = iota
|
|
||||||
nAssignStmt
|
|
||||||
nBadDecl
|
|
||||||
nBadExpr
|
|
||||||
nBadStmt
|
|
||||||
nBasicLit
|
|
||||||
nBinaryExpr
|
|
||||||
nBlockStmt
|
|
||||||
nBranchStmt
|
|
||||||
nCallExpr
|
|
||||||
nCaseClause
|
|
||||||
nChanType
|
|
||||||
nCommClause
|
|
||||||
nComment
|
|
||||||
nCommentGroup
|
|
||||||
nCompositeLit
|
|
||||||
nDeclStmt
|
|
||||||
nDeferStmt
|
|
||||||
nEllipsis
|
|
||||||
nEmptyStmt
|
|
||||||
nExprStmt
|
|
||||||
nField
|
|
||||||
nFieldList
|
|
||||||
nFile
|
|
||||||
nForStmt
|
|
||||||
nFuncDecl
|
|
||||||
nFuncLit
|
|
||||||
nFuncType
|
|
||||||
nGenDecl
|
|
||||||
nGoStmt
|
|
||||||
nIdent
|
|
||||||
nIfStmt
|
|
||||||
nImportSpec
|
|
||||||
nIncDecStmt
|
|
||||||
nIndexExpr
|
|
||||||
nInterfaceType
|
|
||||||
nKeyValueExpr
|
|
||||||
nLabeledStmt
|
|
||||||
nMapType
|
|
||||||
nPackage
|
|
||||||
nParenExpr
|
|
||||||
nRangeStmt
|
|
||||||
nReturnStmt
|
|
||||||
nSelectStmt
|
|
||||||
nSelectorExpr
|
|
||||||
nSendStmt
|
|
||||||
nSliceExpr
|
|
||||||
nStarExpr
|
|
||||||
nStructType
|
|
||||||
nSwitchStmt
|
|
||||||
nTypeAssertExpr
|
|
||||||
nTypeSpec
|
|
||||||
nTypeSwitchStmt
|
|
||||||
nUnaryExpr
|
|
||||||
nValueSpec
|
|
||||||
)
|
|
||||||
|
|
||||||
// typeOf returns a distinct single-bit value that represents the type of n.
|
|
||||||
//
|
|
||||||
// Various implementations were benchmarked with BenchmarkNewInspector:
|
|
||||||
// GOGC=off
|
|
||||||
// - type switch 4.9-5.5ms 2.1ms
|
|
||||||
// - binary search over a sorted list of types 5.5-5.9ms 2.5ms
|
|
||||||
// - linear scan, frequency-ordered list 5.9-6.1ms 2.7ms
|
|
||||||
// - linear scan, unordered list 6.4ms 2.7ms
|
|
||||||
// - hash table 6.5ms 3.1ms
|
|
||||||
// A perfect hash seemed like overkill.
|
|
||||||
//
|
|
||||||
// The compiler's switch statement is the clear winner
|
|
||||||
// as it produces a binary tree in code,
|
|
||||||
// with constant conditions and good branch prediction.
|
|
||||||
// (Sadly it is the most verbose in source code.)
|
|
||||||
// Binary search suffered from poor branch prediction.
|
|
||||||
//
|
|
||||||
func typeOf(n ast.Node) uint64 {
|
|
||||||
// Fast path: nearly half of all nodes are identifiers.
|
|
||||||
if _, ok := n.(*ast.Ident); ok {
|
|
||||||
return 1 << nIdent
|
|
||||||
}
|
|
||||||
|
|
||||||
// These cases include all nodes encountered by ast.Inspect.
|
|
||||||
switch n.(type) {
|
|
||||||
case *ast.ArrayType:
|
|
||||||
return 1 << nArrayType
|
|
||||||
case *ast.AssignStmt:
|
|
||||||
return 1 << nAssignStmt
|
|
||||||
case *ast.BadDecl:
|
|
||||||
return 1 << nBadDecl
|
|
||||||
case *ast.BadExpr:
|
|
||||||
return 1 << nBadExpr
|
|
||||||
case *ast.BadStmt:
|
|
||||||
return 1 << nBadStmt
|
|
||||||
case *ast.BasicLit:
|
|
||||||
return 1 << nBasicLit
|
|
||||||
case *ast.BinaryExpr:
|
|
||||||
return 1 << nBinaryExpr
|
|
||||||
case *ast.BlockStmt:
|
|
||||||
return 1 << nBlockStmt
|
|
||||||
case *ast.BranchStmt:
|
|
||||||
return 1 << nBranchStmt
|
|
||||||
case *ast.CallExpr:
|
|
||||||
return 1 << nCallExpr
|
|
||||||
case *ast.CaseClause:
|
|
||||||
return 1 << nCaseClause
|
|
||||||
case *ast.ChanType:
|
|
||||||
return 1 << nChanType
|
|
||||||
case *ast.CommClause:
|
|
||||||
return 1 << nCommClause
|
|
||||||
case *ast.Comment:
|
|
||||||
return 1 << nComment
|
|
||||||
case *ast.CommentGroup:
|
|
||||||
return 1 << nCommentGroup
|
|
||||||
case *ast.CompositeLit:
|
|
||||||
return 1 << nCompositeLit
|
|
||||||
case *ast.DeclStmt:
|
|
||||||
return 1 << nDeclStmt
|
|
||||||
case *ast.DeferStmt:
|
|
||||||
return 1 << nDeferStmt
|
|
||||||
case *ast.Ellipsis:
|
|
||||||
return 1 << nEllipsis
|
|
||||||
case *ast.EmptyStmt:
|
|
||||||
return 1 << nEmptyStmt
|
|
||||||
case *ast.ExprStmt:
|
|
||||||
return 1 << nExprStmt
|
|
||||||
case *ast.Field:
|
|
||||||
return 1 << nField
|
|
||||||
case *ast.FieldList:
|
|
||||||
return 1 << nFieldList
|
|
||||||
case *ast.File:
|
|
||||||
return 1 << nFile
|
|
||||||
case *ast.ForStmt:
|
|
||||||
return 1 << nForStmt
|
|
||||||
case *ast.FuncDecl:
|
|
||||||
return 1 << nFuncDecl
|
|
||||||
case *ast.FuncLit:
|
|
||||||
return 1 << nFuncLit
|
|
||||||
case *ast.FuncType:
|
|
||||||
return 1 << nFuncType
|
|
||||||
case *ast.GenDecl:
|
|
||||||
return 1 << nGenDecl
|
|
||||||
case *ast.GoStmt:
|
|
||||||
return 1 << nGoStmt
|
|
||||||
case *ast.Ident:
|
|
||||||
return 1 << nIdent
|
|
||||||
case *ast.IfStmt:
|
|
||||||
return 1 << nIfStmt
|
|
||||||
case *ast.ImportSpec:
|
|
||||||
return 1 << nImportSpec
|
|
||||||
case *ast.IncDecStmt:
|
|
||||||
return 1 << nIncDecStmt
|
|
||||||
case *ast.IndexExpr:
|
|
||||||
return 1 << nIndexExpr
|
|
||||||
case *ast.InterfaceType:
|
|
||||||
return 1 << nInterfaceType
|
|
||||||
case *ast.KeyValueExpr:
|
|
||||||
return 1 << nKeyValueExpr
|
|
||||||
case *ast.LabeledStmt:
|
|
||||||
return 1 << nLabeledStmt
|
|
||||||
case *ast.MapType:
|
|
||||||
return 1 << nMapType
|
|
||||||
case *ast.Package:
|
|
||||||
return 1 << nPackage
|
|
||||||
case *ast.ParenExpr:
|
|
||||||
return 1 << nParenExpr
|
|
||||||
case *ast.RangeStmt:
|
|
||||||
return 1 << nRangeStmt
|
|
||||||
case *ast.ReturnStmt:
|
|
||||||
return 1 << nReturnStmt
|
|
||||||
case *ast.SelectStmt:
|
|
||||||
return 1 << nSelectStmt
|
|
||||||
case *ast.SelectorExpr:
|
|
||||||
return 1 << nSelectorExpr
|
|
||||||
case *ast.SendStmt:
|
|
||||||
return 1 << nSendStmt
|
|
||||||
case *ast.SliceExpr:
|
|
||||||
return 1 << nSliceExpr
|
|
||||||
case *ast.StarExpr:
|
|
||||||
return 1 << nStarExpr
|
|
||||||
case *ast.StructType:
|
|
||||||
return 1 << nStructType
|
|
||||||
case *ast.SwitchStmt:
|
|
||||||
return 1 << nSwitchStmt
|
|
||||||
case *ast.TypeAssertExpr:
|
|
||||||
return 1 << nTypeAssertExpr
|
|
||||||
case *ast.TypeSpec:
|
|
||||||
return 1 << nTypeSpec
|
|
||||||
case *ast.TypeSwitchStmt:
|
|
||||||
return 1 << nTypeSwitchStmt
|
|
||||||
case *ast.UnaryExpr:
|
|
||||||
return 1 << nUnaryExpr
|
|
||||||
case *ast.ValueSpec:
|
|
||||||
return 1 << nValueSpec
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func maskOf(nodes []ast.Node) uint64 {
|
|
||||||
if nodes == nil {
|
|
||||||
return 1<<64 - 1 // match all node types
|
|
||||||
}
|
|
||||||
var mask uint64
|
|
||||||
for _, n := range nodes {
|
|
||||||
mask |= typeOf(n)
|
|
||||||
}
|
|
||||||
return mask
|
|
||||||
}
|
|
198
vendor/golang.org/x/tools/go/buildutil/allpackages.go
generated
vendored
198
vendor/golang.org/x/tools/go/buildutil/allpackages.go
generated
vendored
|
@ -1,198 +0,0 @@
|
||||||
// Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package buildutil provides utilities related to the go/build
|
|
||||||
// package in the standard library.
|
|
||||||
//
|
|
||||||
// All I/O is done via the build.Context file system interface, which must
|
|
||||||
// be concurrency-safe.
|
|
||||||
package buildutil // import "golang.org/x/tools/go/buildutil"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"go/build"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
// AllPackages returns the package path of each Go package in any source
|
|
||||||
// directory of the specified build context (e.g. $GOROOT or an element
|
|
||||||
// of $GOPATH). Errors are ignored. The results are sorted.
|
|
||||||
// All package paths are canonical, and thus may contain "/vendor/".
|
|
||||||
//
|
|
||||||
// The result may include import paths for directories that contain no
|
|
||||||
// *.go files, such as "archive" (in $GOROOT/src).
|
|
||||||
//
|
|
||||||
// All I/O is done via the build.Context file system interface,
|
|
||||||
// which must be concurrency-safe.
|
|
||||||
//
|
|
||||||
func AllPackages(ctxt *build.Context) []string {
|
|
||||||
var list []string
|
|
||||||
ForEachPackage(ctxt, func(pkg string, _ error) {
|
|
||||||
list = append(list, pkg)
|
|
||||||
})
|
|
||||||
sort.Strings(list)
|
|
||||||
return list
|
|
||||||
}
|
|
||||||
|
|
||||||
// ForEachPackage calls the found function with the package path of
|
|
||||||
// each Go package it finds in any source directory of the specified
|
|
||||||
// build context (e.g. $GOROOT or an element of $GOPATH).
|
|
||||||
// All package paths are canonical, and thus may contain "/vendor/".
|
|
||||||
//
|
|
||||||
// If the package directory exists but could not be read, the second
|
|
||||||
// argument to the found function provides the error.
|
|
||||||
//
|
|
||||||
// All I/O is done via the build.Context file system interface,
|
|
||||||
// which must be concurrency-safe.
|
|
||||||
//
|
|
||||||
func ForEachPackage(ctxt *build.Context, found func(importPath string, err error)) {
|
|
||||||
ch := make(chan item)
|
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
for _, root := range ctxt.SrcDirs() {
|
|
||||||
root := root
|
|
||||||
wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
allPackages(ctxt, root, ch)
|
|
||||||
wg.Done()
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
go func() {
|
|
||||||
wg.Wait()
|
|
||||||
close(ch)
|
|
||||||
}()
|
|
||||||
|
|
||||||
// All calls to found occur in the caller's goroutine.
|
|
||||||
for i := range ch {
|
|
||||||
found(i.importPath, i.err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type item struct {
|
|
||||||
importPath string
|
|
||||||
err error // (optional)
|
|
||||||
}
|
|
||||||
|
|
||||||
// We use a process-wide counting semaphore to limit
|
|
||||||
// the number of parallel calls to ReadDir.
|
|
||||||
var ioLimit = make(chan bool, 20)
|
|
||||||
|
|
||||||
func allPackages(ctxt *build.Context, root string, ch chan<- item) {
|
|
||||||
root = filepath.Clean(root) + string(os.PathSeparator)
|
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
|
|
||||||
var walkDir func(dir string)
|
|
||||||
walkDir = func(dir string) {
|
|
||||||
// Avoid .foo, _foo, and testdata directory trees.
|
|
||||||
base := filepath.Base(dir)
|
|
||||||
if base == "" || base[0] == '.' || base[0] == '_' || base == "testdata" {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
pkg := filepath.ToSlash(strings.TrimPrefix(dir, root))
|
|
||||||
|
|
||||||
// Prune search if we encounter any of these import paths.
|
|
||||||
switch pkg {
|
|
||||||
case "builtin":
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
ioLimit <- true
|
|
||||||
files, err := ReadDir(ctxt, dir)
|
|
||||||
<-ioLimit
|
|
||||||
if pkg != "" || err != nil {
|
|
||||||
ch <- item{pkg, err}
|
|
||||||
}
|
|
||||||
for _, fi := range files {
|
|
||||||
fi := fi
|
|
||||||
if fi.IsDir() {
|
|
||||||
wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
walkDir(filepath.Join(dir, fi.Name()))
|
|
||||||
wg.Done()
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
walkDir(root)
|
|
||||||
wg.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExpandPatterns returns the set of packages matched by patterns,
|
|
||||||
// which may have the following forms:
|
|
||||||
//
|
|
||||||
// golang.org/x/tools/cmd/guru # a single package
|
|
||||||
// golang.org/x/tools/... # all packages beneath dir
|
|
||||||
// ... # the entire workspace.
|
|
||||||
//
|
|
||||||
// Order is significant: a pattern preceded by '-' removes matching
|
|
||||||
// packages from the set. For example, these patterns match all encoding
|
|
||||||
// packages except encoding/xml:
|
|
||||||
//
|
|
||||||
// encoding/... -encoding/xml
|
|
||||||
//
|
|
||||||
// A trailing slash in a pattern is ignored. (Path components of Go
|
|
||||||
// package names are separated by slash, not the platform's path separator.)
|
|
||||||
//
|
|
||||||
func ExpandPatterns(ctxt *build.Context, patterns []string) map[string]bool {
|
|
||||||
// TODO(adonovan): support other features of 'go list':
|
|
||||||
// - "std"/"cmd"/"all" meta-packages
|
|
||||||
// - "..." not at the end of a pattern
|
|
||||||
// - relative patterns using "./" or "../" prefix
|
|
||||||
|
|
||||||
pkgs := make(map[string]bool)
|
|
||||||
doPkg := func(pkg string, neg bool) {
|
|
||||||
if neg {
|
|
||||||
delete(pkgs, pkg)
|
|
||||||
} else {
|
|
||||||
pkgs[pkg] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Scan entire workspace if wildcards are present.
|
|
||||||
// TODO(adonovan): opt: scan only the necessary subtrees of the workspace.
|
|
||||||
var all []string
|
|
||||||
for _, arg := range patterns {
|
|
||||||
if strings.HasSuffix(arg, "...") {
|
|
||||||
all = AllPackages(ctxt)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, arg := range patterns {
|
|
||||||
if arg == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
neg := arg[0] == '-'
|
|
||||||
if neg {
|
|
||||||
arg = arg[1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
if arg == "..." {
|
|
||||||
// ... matches all packages
|
|
||||||
for _, pkg := range all {
|
|
||||||
doPkg(pkg, neg)
|
|
||||||
}
|
|
||||||
} else if dir := strings.TrimSuffix(arg, "/..."); dir != arg {
|
|
||||||
// dir/... matches all packages beneath dir
|
|
||||||
for _, pkg := range all {
|
|
||||||
if strings.HasPrefix(pkg, dir) &&
|
|
||||||
(len(pkg) == len(dir) || pkg[len(dir)] == '/') {
|
|
||||||
doPkg(pkg, neg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// single package
|
|
||||||
doPkg(strings.TrimSuffix(arg, "/"), neg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return pkgs
|
|
||||||
}
|
|
109
vendor/golang.org/x/tools/go/buildutil/fakecontext.go
generated
vendored
109
vendor/golang.org/x/tools/go/buildutil/fakecontext.go
generated
vendored
|
@ -1,109 +0,0 @@
|
||||||
package buildutil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"go/build"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
"path/filepath"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// FakeContext returns a build.Context for the fake file tree specified
|
|
||||||
// by pkgs, which maps package import paths to a mapping from file base
|
|
||||||
// names to contents.
|
|
||||||
//
|
|
||||||
// The fake Context has a GOROOT of "/go" and no GOPATH, and overrides
|
|
||||||
// the necessary file access methods to read from memory instead of the
|
|
||||||
// real file system.
|
|
||||||
//
|
|
||||||
// Unlike a real file tree, the fake one has only two levels---packages
|
|
||||||
// and files---so ReadDir("/go/src/") returns all packages under
|
|
||||||
// /go/src/ including, for instance, "math" and "math/big".
|
|
||||||
// ReadDir("/go/src/math/big") would return all the files in the
|
|
||||||
// "math/big" package.
|
|
||||||
//
|
|
||||||
func FakeContext(pkgs map[string]map[string]string) *build.Context {
|
|
||||||
clean := func(filename string) string {
|
|
||||||
f := path.Clean(filepath.ToSlash(filename))
|
|
||||||
// Removing "/go/src" while respecting segment
|
|
||||||
// boundaries has this unfortunate corner case:
|
|
||||||
if f == "/go/src" {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return strings.TrimPrefix(f, "/go/src/")
|
|
||||||
}
|
|
||||||
|
|
||||||
ctxt := build.Default // copy
|
|
||||||
ctxt.GOROOT = "/go"
|
|
||||||
ctxt.GOPATH = ""
|
|
||||||
ctxt.Compiler = "gc"
|
|
||||||
ctxt.IsDir = func(dir string) bool {
|
|
||||||
dir = clean(dir)
|
|
||||||
if dir == "" {
|
|
||||||
return true // needed by (*build.Context).SrcDirs
|
|
||||||
}
|
|
||||||
return pkgs[dir] != nil
|
|
||||||
}
|
|
||||||
ctxt.ReadDir = func(dir string) ([]os.FileInfo, error) {
|
|
||||||
dir = clean(dir)
|
|
||||||
var fis []os.FileInfo
|
|
||||||
if dir == "" {
|
|
||||||
// enumerate packages
|
|
||||||
for importPath := range pkgs {
|
|
||||||
fis = append(fis, fakeDirInfo(importPath))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// enumerate files of package
|
|
||||||
for basename := range pkgs[dir] {
|
|
||||||
fis = append(fis, fakeFileInfo(basename))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sort.Sort(byName(fis))
|
|
||||||
return fis, nil
|
|
||||||
}
|
|
||||||
ctxt.OpenFile = func(filename string) (io.ReadCloser, error) {
|
|
||||||
filename = clean(filename)
|
|
||||||
dir, base := path.Split(filename)
|
|
||||||
content, ok := pkgs[path.Clean(dir)][base]
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("file not found: %s", filename)
|
|
||||||
}
|
|
||||||
return ioutil.NopCloser(strings.NewReader(content)), nil
|
|
||||||
}
|
|
||||||
ctxt.IsAbsPath = func(path string) bool {
|
|
||||||
path = filepath.ToSlash(path)
|
|
||||||
// Don't rely on the default (filepath.Path) since on
|
|
||||||
// Windows, it reports virtual paths as non-absolute.
|
|
||||||
return strings.HasPrefix(path, "/")
|
|
||||||
}
|
|
||||||
return &ctxt
|
|
||||||
}
|
|
||||||
|
|
||||||
type byName []os.FileInfo
|
|
||||||
|
|
||||||
func (s byName) Len() int { return len(s) }
|
|
||||||
func (s byName) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
|
||||||
func (s byName) Less(i, j int) bool { return s[i].Name() < s[j].Name() }
|
|
||||||
|
|
||||||
type fakeFileInfo string
|
|
||||||
|
|
||||||
func (fi fakeFileInfo) Name() string { return string(fi) }
|
|
||||||
func (fakeFileInfo) Sys() interface{} { return nil }
|
|
||||||
func (fakeFileInfo) ModTime() time.Time { return time.Time{} }
|
|
||||||
func (fakeFileInfo) IsDir() bool { return false }
|
|
||||||
func (fakeFileInfo) Size() int64 { return 0 }
|
|
||||||
func (fakeFileInfo) Mode() os.FileMode { return 0644 }
|
|
||||||
|
|
||||||
type fakeDirInfo string
|
|
||||||
|
|
||||||
func (fd fakeDirInfo) Name() string { return string(fd) }
|
|
||||||
func (fakeDirInfo) Sys() interface{} { return nil }
|
|
||||||
func (fakeDirInfo) ModTime() time.Time { return time.Time{} }
|
|
||||||
func (fakeDirInfo) IsDir() bool { return true }
|
|
||||||
func (fakeDirInfo) Size() int64 { return 0 }
|
|
||||||
func (fakeDirInfo) Mode() os.FileMode { return 0755 }
|
|
103
vendor/golang.org/x/tools/go/buildutil/overlay.go
generated
vendored
103
vendor/golang.org/x/tools/go/buildutil/overlay.go
generated
vendored
|
@ -1,103 +0,0 @@
|
||||||
// Copyright 2016 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package buildutil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"go/build"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"path/filepath"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// OverlayContext overlays a build.Context with additional files from
|
|
||||||
// a map. Files in the map take precedence over other files.
|
|
||||||
//
|
|
||||||
// In addition to plain string comparison, two file names are
|
|
||||||
// considered equal if their base names match and their directory
|
|
||||||
// components point at the same directory on the file system. That is,
|
|
||||||
// symbolic links are followed for directories, but not files.
|
|
||||||
//
|
|
||||||
// A common use case for OverlayContext is to allow editors to pass in
|
|
||||||
// a set of unsaved, modified files.
|
|
||||||
//
|
|
||||||
// Currently, only the Context.OpenFile function will respect the
|
|
||||||
// overlay. This may change in the future.
|
|
||||||
func OverlayContext(orig *build.Context, overlay map[string][]byte) *build.Context {
|
|
||||||
// TODO(dominikh): Implement IsDir, HasSubdir and ReadDir
|
|
||||||
|
|
||||||
rc := func(data []byte) (io.ReadCloser, error) {
|
|
||||||
return ioutil.NopCloser(bytes.NewBuffer(data)), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
copy := *orig // make a copy
|
|
||||||
ctxt := ©
|
|
||||||
ctxt.OpenFile = func(path string) (io.ReadCloser, error) {
|
|
||||||
// Fast path: names match exactly.
|
|
||||||
if content, ok := overlay[path]; ok {
|
|
||||||
return rc(content)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Slow path: check for same file under a different
|
|
||||||
// alias, perhaps due to a symbolic link.
|
|
||||||
for filename, content := range overlay {
|
|
||||||
if sameFile(path, filename) {
|
|
||||||
return rc(content)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return OpenFile(orig, path)
|
|
||||||
}
|
|
||||||
return ctxt
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseOverlayArchive parses an archive containing Go files and their
|
|
||||||
// contents. The result is intended to be used with OverlayContext.
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// Archive format
|
|
||||||
//
|
|
||||||
// The archive consists of a series of files. Each file consists of a
|
|
||||||
// name, a decimal file size and the file contents, separated by
|
|
||||||
// newlines. No newline follows after the file contents.
|
|
||||||
func ParseOverlayArchive(archive io.Reader) (map[string][]byte, error) {
|
|
||||||
overlay := make(map[string][]byte)
|
|
||||||
r := bufio.NewReader(archive)
|
|
||||||
for {
|
|
||||||
// Read file name.
|
|
||||||
filename, err := r.ReadString('\n')
|
|
||||||
if err != nil {
|
|
||||||
if err == io.EOF {
|
|
||||||
break // OK
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("reading archive file name: %v", err)
|
|
||||||
}
|
|
||||||
filename = filepath.Clean(strings.TrimSpace(filename))
|
|
||||||
|
|
||||||
// Read file size.
|
|
||||||
sz, err := r.ReadString('\n')
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("reading size of archive file %s: %v", filename, err)
|
|
||||||
}
|
|
||||||
sz = strings.TrimSpace(sz)
|
|
||||||
size, err := strconv.ParseUint(sz, 10, 32)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("parsing size of archive file %s: %v", filename, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read file content.
|
|
||||||
content := make([]byte, size)
|
|
||||||
if _, err := io.ReadFull(r, content); err != nil {
|
|
||||||
return nil, fmt.Errorf("reading archive file %s: %v", filename, err)
|
|
||||||
}
|
|
||||||
overlay[filename] = content
|
|
||||||
}
|
|
||||||
|
|
||||||
return overlay, nil
|
|
||||||
}
|
|
75
vendor/golang.org/x/tools/go/buildutil/tags.go
generated
vendored
75
vendor/golang.org/x/tools/go/buildutil/tags.go
generated
vendored
|
@ -1,75 +0,0 @@
|
||||||
package buildutil
|
|
||||||
|
|
||||||
// This logic was copied from stringsFlag from $GOROOT/src/cmd/go/build.go.
|
|
||||||
|
|
||||||
import "fmt"
|
|
||||||
|
|
||||||
const TagsFlagDoc = "a list of `build tags` to consider satisfied during the build. " +
|
|
||||||
"For more information about build tags, see the description of " +
|
|
||||||
"build constraints in the documentation for the go/build package"
|
|
||||||
|
|
||||||
// TagsFlag is an implementation of the flag.Value and flag.Getter interfaces that parses
|
|
||||||
// a flag value in the same manner as go build's -tags flag and
|
|
||||||
// populates a []string slice.
|
|
||||||
//
|
|
||||||
// See $GOROOT/src/go/build/doc.go for description of build tags.
|
|
||||||
// See $GOROOT/src/cmd/go/doc.go for description of 'go build -tags' flag.
|
|
||||||
//
|
|
||||||
// Example:
|
|
||||||
// flag.Var((*buildutil.TagsFlag)(&build.Default.BuildTags), "tags", buildutil.TagsFlagDoc)
|
|
||||||
type TagsFlag []string
|
|
||||||
|
|
||||||
func (v *TagsFlag) Set(s string) error {
|
|
||||||
var err error
|
|
||||||
*v, err = splitQuotedFields(s)
|
|
||||||
if *v == nil {
|
|
||||||
*v = []string{}
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *TagsFlag) Get() interface{} { return *v }
|
|
||||||
|
|
||||||
func splitQuotedFields(s string) ([]string, error) {
|
|
||||||
// Split fields allowing '' or "" around elements.
|
|
||||||
// Quotes further inside the string do not count.
|
|
||||||
var f []string
|
|
||||||
for len(s) > 0 {
|
|
||||||
for len(s) > 0 && isSpaceByte(s[0]) {
|
|
||||||
s = s[1:]
|
|
||||||
}
|
|
||||||
if len(s) == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
// Accepted quoted string. No unescaping inside.
|
|
||||||
if s[0] == '"' || s[0] == '\'' {
|
|
||||||
quote := s[0]
|
|
||||||
s = s[1:]
|
|
||||||
i := 0
|
|
||||||
for i < len(s) && s[i] != quote {
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
if i >= len(s) {
|
|
||||||
return nil, fmt.Errorf("unterminated %c string", quote)
|
|
||||||
}
|
|
||||||
f = append(f, s[:i])
|
|
||||||
s = s[i+1:]
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
i := 0
|
|
||||||
for i < len(s) && !isSpaceByte(s[i]) {
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
f = append(f, s[:i])
|
|
||||||
s = s[i:]
|
|
||||||
}
|
|
||||||
return f, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *TagsFlag) String() string {
|
|
||||||
return "<tagsFlag>"
|
|
||||||
}
|
|
||||||
|
|
||||||
func isSpaceByte(c byte) bool {
|
|
||||||
return c == ' ' || c == '\t' || c == '\n' || c == '\r'
|
|
||||||
}
|
|
212
vendor/golang.org/x/tools/go/buildutil/util.go
generated
vendored
212
vendor/golang.org/x/tools/go/buildutil/util.go
generated
vendored
|
@ -1,212 +0,0 @@
|
||||||
// Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package buildutil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"go/ast"
|
|
||||||
"go/build"
|
|
||||||
"go/parser"
|
|
||||||
"go/token"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ParseFile behaves like parser.ParseFile,
|
|
||||||
// but uses the build context's file system interface, if any.
|
|
||||||
//
|
|
||||||
// If file is not absolute (as defined by IsAbsPath), the (dir, file)
|
|
||||||
// components are joined using JoinPath; dir must be absolute.
|
|
||||||
//
|
|
||||||
// The displayPath function, if provided, is used to transform the
|
|
||||||
// filename that will be attached to the ASTs.
|
|
||||||
//
|
|
||||||
// TODO(adonovan): call this from go/loader.parseFiles when the tree thaws.
|
|
||||||
//
|
|
||||||
func ParseFile(fset *token.FileSet, ctxt *build.Context, displayPath func(string) string, dir string, file string, mode parser.Mode) (*ast.File, error) {
|
|
||||||
if !IsAbsPath(ctxt, file) {
|
|
||||||
file = JoinPath(ctxt, dir, file)
|
|
||||||
}
|
|
||||||
rd, err := OpenFile(ctxt, file)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer rd.Close() // ignore error
|
|
||||||
if displayPath != nil {
|
|
||||||
file = displayPath(file)
|
|
||||||
}
|
|
||||||
return parser.ParseFile(fset, file, rd, mode)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ContainingPackage returns the package containing filename.
|
|
||||||
//
|
|
||||||
// If filename is not absolute, it is interpreted relative to working directory dir.
|
|
||||||
// All I/O is via the build context's file system interface, if any.
|
|
||||||
//
|
|
||||||
// The '...Files []string' fields of the resulting build.Package are not
|
|
||||||
// populated (build.FindOnly mode).
|
|
||||||
//
|
|
||||||
func ContainingPackage(ctxt *build.Context, dir, filename string) (*build.Package, error) {
|
|
||||||
if !IsAbsPath(ctxt, filename) {
|
|
||||||
filename = JoinPath(ctxt, dir, filename)
|
|
||||||
}
|
|
||||||
|
|
||||||
// We must not assume the file tree uses
|
|
||||||
// "/" always,
|
|
||||||
// `\` always,
|
|
||||||
// or os.PathSeparator (which varies by platform),
|
|
||||||
// but to make any progress, we are forced to assume that
|
|
||||||
// paths will not use `\` unless the PathSeparator
|
|
||||||
// is also `\`, thus we can rely on filepath.ToSlash for some sanity.
|
|
||||||
|
|
||||||
dirSlash := path.Dir(filepath.ToSlash(filename)) + "/"
|
|
||||||
|
|
||||||
// We assume that no source root (GOPATH[i] or GOROOT) contains any other.
|
|
||||||
for _, srcdir := range ctxt.SrcDirs() {
|
|
||||||
srcdirSlash := filepath.ToSlash(srcdir) + "/"
|
|
||||||
if importPath, ok := HasSubdir(ctxt, srcdirSlash, dirSlash); ok {
|
|
||||||
return ctxt.Import(importPath, dir, build.FindOnly)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil, fmt.Errorf("can't find package containing %s", filename)
|
|
||||||
}
|
|
||||||
|
|
||||||
// -- Effective methods of file system interface -------------------------
|
|
||||||
|
|
||||||
// (go/build.Context defines these as methods, but does not export them.)
|
|
||||||
|
|
||||||
// hasSubdir calls ctxt.HasSubdir (if not nil) or else uses
|
|
||||||
// the local file system to answer the question.
|
|
||||||
func HasSubdir(ctxt *build.Context, root, dir string) (rel string, ok bool) {
|
|
||||||
if f := ctxt.HasSubdir; f != nil {
|
|
||||||
return f(root, dir)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try using paths we received.
|
|
||||||
if rel, ok = hasSubdir(root, dir); ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try expanding symlinks and comparing
|
|
||||||
// expanded against unexpanded and
|
|
||||||
// expanded against expanded.
|
|
||||||
rootSym, _ := filepath.EvalSymlinks(root)
|
|
||||||
dirSym, _ := filepath.EvalSymlinks(dir)
|
|
||||||
|
|
||||||
if rel, ok = hasSubdir(rootSym, dir); ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if rel, ok = hasSubdir(root, dirSym); ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return hasSubdir(rootSym, dirSym)
|
|
||||||
}
|
|
||||||
|
|
||||||
func hasSubdir(root, dir string) (rel string, ok bool) {
|
|
||||||
const sep = string(filepath.Separator)
|
|
||||||
root = filepath.Clean(root)
|
|
||||||
if !strings.HasSuffix(root, sep) {
|
|
||||||
root += sep
|
|
||||||
}
|
|
||||||
|
|
||||||
dir = filepath.Clean(dir)
|
|
||||||
if !strings.HasPrefix(dir, root) {
|
|
||||||
return "", false
|
|
||||||
}
|
|
||||||
|
|
||||||
return filepath.ToSlash(dir[len(root):]), true
|
|
||||||
}
|
|
||||||
|
|
||||||
// FileExists returns true if the specified file exists,
|
|
||||||
// using the build context's file system interface.
|
|
||||||
func FileExists(ctxt *build.Context, path string) bool {
|
|
||||||
if ctxt.OpenFile != nil {
|
|
||||||
r, err := ctxt.OpenFile(path)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
r.Close() // ignore error
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
_, err := os.Stat(path)
|
|
||||||
return err == nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// OpenFile behaves like os.Open,
|
|
||||||
// but uses the build context's file system interface, if any.
|
|
||||||
func OpenFile(ctxt *build.Context, path string) (io.ReadCloser, error) {
|
|
||||||
if ctxt.OpenFile != nil {
|
|
||||||
return ctxt.OpenFile(path)
|
|
||||||
}
|
|
||||||
return os.Open(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsAbsPath behaves like filepath.IsAbs,
|
|
||||||
// but uses the build context's file system interface, if any.
|
|
||||||
func IsAbsPath(ctxt *build.Context, path string) bool {
|
|
||||||
if ctxt.IsAbsPath != nil {
|
|
||||||
return ctxt.IsAbsPath(path)
|
|
||||||
}
|
|
||||||
return filepath.IsAbs(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
// JoinPath behaves like filepath.Join,
|
|
||||||
// but uses the build context's file system interface, if any.
|
|
||||||
func JoinPath(ctxt *build.Context, path ...string) string {
|
|
||||||
if ctxt.JoinPath != nil {
|
|
||||||
return ctxt.JoinPath(path...)
|
|
||||||
}
|
|
||||||
return filepath.Join(path...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// IsDir behaves like os.Stat plus IsDir,
|
|
||||||
// but uses the build context's file system interface, if any.
|
|
||||||
func IsDir(ctxt *build.Context, path string) bool {
|
|
||||||
if ctxt.IsDir != nil {
|
|
||||||
return ctxt.IsDir(path)
|
|
||||||
}
|
|
||||||
fi, err := os.Stat(path)
|
|
||||||
return err == nil && fi.IsDir()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadDir behaves like ioutil.ReadDir,
|
|
||||||
// but uses the build context's file system interface, if any.
|
|
||||||
func ReadDir(ctxt *build.Context, path string) ([]os.FileInfo, error) {
|
|
||||||
if ctxt.ReadDir != nil {
|
|
||||||
return ctxt.ReadDir(path)
|
|
||||||
}
|
|
||||||
return ioutil.ReadDir(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SplitPathList behaves like filepath.SplitList,
|
|
||||||
// but uses the build context's file system interface, if any.
|
|
||||||
func SplitPathList(ctxt *build.Context, s string) []string {
|
|
||||||
if ctxt.SplitPathList != nil {
|
|
||||||
return ctxt.SplitPathList(s)
|
|
||||||
}
|
|
||||||
return filepath.SplitList(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// sameFile returns true if x and y have the same basename and denote
|
|
||||||
// the same file.
|
|
||||||
//
|
|
||||||
func sameFile(x, y string) bool {
|
|
||||||
if path.Clean(x) == path.Clean(y) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
if filepath.Base(x) == filepath.Base(y) { // (optimisation)
|
|
||||||
if xi, err := os.Stat(x); err == nil {
|
|
||||||
if yi, err := os.Stat(y); err == nil {
|
|
||||||
return os.SameFile(xi, yi)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
220
vendor/golang.org/x/tools/go/internal/cgo/cgo.go
generated
vendored
220
vendor/golang.org/x/tools/go/internal/cgo/cgo.go
generated
vendored
|
@ -1,220 +0,0 @@
|
||||||
// Copyright 2013 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package cgo handles cgo preprocessing of files containing `import "C"`.
|
|
||||||
//
|
|
||||||
// DESIGN
|
|
||||||
//
|
|
||||||
// The approach taken is to run the cgo processor on the package's
|
|
||||||
// CgoFiles and parse the output, faking the filenames of the
|
|
||||||
// resulting ASTs so that the synthetic file containing the C types is
|
|
||||||
// called "C" (e.g. "~/go/src/net/C") and the preprocessed files
|
|
||||||
// have their original names (e.g. "~/go/src/net/cgo_unix.go"),
|
|
||||||
// not the names of the actual temporary files.
|
|
||||||
//
|
|
||||||
// The advantage of this approach is its fidelity to 'go build'. The
|
|
||||||
// downside is that the token.Position.Offset for each AST node is
|
|
||||||
// incorrect, being an offset within the temporary file. Line numbers
|
|
||||||
// should still be correct because of the //line comments.
|
|
||||||
//
|
|
||||||
// The logic of this file is mostly plundered from the 'go build'
|
|
||||||
// tool, which also invokes the cgo preprocessor.
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// REJECTED ALTERNATIVE
|
|
||||||
//
|
|
||||||
// An alternative approach that we explored is to extend go/types'
|
|
||||||
// Importer mechanism to provide the identity of the importing package
|
|
||||||
// so that each time `import "C"` appears it resolves to a different
|
|
||||||
// synthetic package containing just the objects needed in that case.
|
|
||||||
// The loader would invoke cgo but parse only the cgo_types.go file
|
|
||||||
// defining the package-level objects, discarding the other files
|
|
||||||
// resulting from preprocessing.
|
|
||||||
//
|
|
||||||
// The benefit of this approach would have been that source-level
|
|
||||||
// syntax information would correspond exactly to the original cgo
|
|
||||||
// file, with no preprocessing involved, making source tools like
|
|
||||||
// godoc, guru, and eg happy. However, the approach was rejected
|
|
||||||
// due to the additional complexity it would impose on go/types. (It
|
|
||||||
// made for a beautiful demo, though.)
|
|
||||||
//
|
|
||||||
// cgo files, despite their *.go extension, are not legal Go source
|
|
||||||
// files per the specification since they may refer to unexported
|
|
||||||
// members of package "C" such as C.int. Also, a function such as
|
|
||||||
// C.getpwent has in effect two types, one matching its C type and one
|
|
||||||
// which additionally returns (errno C.int). The cgo preprocessor
|
|
||||||
// uses name mangling to distinguish these two functions in the
|
|
||||||
// processed code, but go/types would need to duplicate this logic in
|
|
||||||
// its handling of function calls, analogous to the treatment of map
|
|
||||||
// lookups in which y=m[k] and y,ok=m[k] are both legal.
|
|
||||||
|
|
||||||
package cgo
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"go/ast"
|
|
||||||
"go/build"
|
|
||||||
"go/parser"
|
|
||||||
"go/token"
|
|
||||||
"io/ioutil"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
|
||||||
"regexp"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ProcessFiles invokes the cgo preprocessor on bp.CgoFiles, parses
|
|
||||||
// the output and returns the resulting ASTs.
|
|
||||||
//
|
|
||||||
func ProcessFiles(bp *build.Package, fset *token.FileSet, DisplayPath func(path string) string, mode parser.Mode) ([]*ast.File, error) {
|
|
||||||
tmpdir, err := ioutil.TempDir("", strings.Replace(bp.ImportPath, "/", "_", -1)+"_C")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer os.RemoveAll(tmpdir)
|
|
||||||
|
|
||||||
pkgdir := bp.Dir
|
|
||||||
if DisplayPath != nil {
|
|
||||||
pkgdir = DisplayPath(pkgdir)
|
|
||||||
}
|
|
||||||
|
|
||||||
cgoFiles, cgoDisplayFiles, err := Run(bp, pkgdir, tmpdir, false)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var files []*ast.File
|
|
||||||
for i := range cgoFiles {
|
|
||||||
rd, err := os.Open(cgoFiles[i])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
display := filepath.Join(bp.Dir, cgoDisplayFiles[i])
|
|
||||||
f, err := parser.ParseFile(fset, display, rd, mode)
|
|
||||||
rd.Close()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
files = append(files, f)
|
|
||||||
}
|
|
||||||
return files, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var cgoRe = regexp.MustCompile(`[/\\:]`)
|
|
||||||
|
|
||||||
// Run invokes the cgo preprocessor on bp.CgoFiles and returns two
|
|
||||||
// lists of files: the resulting processed files (in temporary
|
|
||||||
// directory tmpdir) and the corresponding names of the unprocessed files.
|
|
||||||
//
|
|
||||||
// Run is adapted from (*builder).cgo in
|
|
||||||
// $GOROOT/src/cmd/go/build.go, but these features are unsupported:
|
|
||||||
// Objective C, CGOPKGPATH, CGO_FLAGS.
|
|
||||||
//
|
|
||||||
// If useabs is set to true, absolute paths of the bp.CgoFiles will be passed in
|
|
||||||
// to the cgo preprocessor. This in turn will set the // line comments
|
|
||||||
// referring to those files to use absolute paths. This is needed for
|
|
||||||
// go/packages using the legacy go list support so it is able to find
|
|
||||||
// the original files.
|
|
||||||
func Run(bp *build.Package, pkgdir, tmpdir string, useabs bool) (files, displayFiles []string, err error) {
|
|
||||||
cgoCPPFLAGS, _, _, _ := cflags(bp, true)
|
|
||||||
_, cgoexeCFLAGS, _, _ := cflags(bp, false)
|
|
||||||
|
|
||||||
if len(bp.CgoPkgConfig) > 0 {
|
|
||||||
pcCFLAGS, err := pkgConfigFlags(bp)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
cgoCPPFLAGS = append(cgoCPPFLAGS, pcCFLAGS...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Allows including _cgo_export.h from .[ch] files in the package.
|
|
||||||
cgoCPPFLAGS = append(cgoCPPFLAGS, "-I", tmpdir)
|
|
||||||
|
|
||||||
// _cgo_gotypes.go (displayed "C") contains the type definitions.
|
|
||||||
files = append(files, filepath.Join(tmpdir, "_cgo_gotypes.go"))
|
|
||||||
displayFiles = append(displayFiles, "C")
|
|
||||||
for _, fn := range bp.CgoFiles {
|
|
||||||
// "foo.cgo1.go" (displayed "foo.go") is the processed Go source.
|
|
||||||
f := cgoRe.ReplaceAllString(fn[:len(fn)-len("go")], "_")
|
|
||||||
files = append(files, filepath.Join(tmpdir, f+"cgo1.go"))
|
|
||||||
displayFiles = append(displayFiles, fn)
|
|
||||||
}
|
|
||||||
|
|
||||||
var cgoflags []string
|
|
||||||
if bp.Goroot && bp.ImportPath == "runtime/cgo" {
|
|
||||||
cgoflags = append(cgoflags, "-import_runtime_cgo=false")
|
|
||||||
}
|
|
||||||
if bp.Goroot && bp.ImportPath == "runtime/race" || bp.ImportPath == "runtime/cgo" {
|
|
||||||
cgoflags = append(cgoflags, "-import_syscall=false")
|
|
||||||
}
|
|
||||||
|
|
||||||
var cgoFiles []string = bp.CgoFiles
|
|
||||||
if useabs {
|
|
||||||
cgoFiles = make([]string, len(bp.CgoFiles))
|
|
||||||
for i := range cgoFiles {
|
|
||||||
cgoFiles[i] = filepath.Join(pkgdir, bp.CgoFiles[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
args := stringList(
|
|
||||||
"go", "tool", "cgo", "-objdir", tmpdir, cgoflags, "--",
|
|
||||||
cgoCPPFLAGS, cgoexeCFLAGS, cgoFiles,
|
|
||||||
)
|
|
||||||
if false {
|
|
||||||
log.Printf("Running cgo for package %q: %s (dir=%s)", bp.ImportPath, args, pkgdir)
|
|
||||||
}
|
|
||||||
cmd := exec.Command(args[0], args[1:]...)
|
|
||||||
cmd.Dir = pkgdir
|
|
||||||
cmd.Stdout = os.Stderr
|
|
||||||
cmd.Stderr = os.Stderr
|
|
||||||
if err := cmd.Run(); err != nil {
|
|
||||||
return nil, nil, fmt.Errorf("cgo failed: %s: %s", args, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return files, displayFiles, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// -- unmodified from 'go build' ---------------------------------------
|
|
||||||
|
|
||||||
// Return the flags to use when invoking the C or C++ compilers, or cgo.
|
|
||||||
func cflags(p *build.Package, def bool) (cppflags, cflags, cxxflags, ldflags []string) {
|
|
||||||
var defaults string
|
|
||||||
if def {
|
|
||||||
defaults = "-g -O2"
|
|
||||||
}
|
|
||||||
|
|
||||||
cppflags = stringList(envList("CGO_CPPFLAGS", ""), p.CgoCPPFLAGS)
|
|
||||||
cflags = stringList(envList("CGO_CFLAGS", defaults), p.CgoCFLAGS)
|
|
||||||
cxxflags = stringList(envList("CGO_CXXFLAGS", defaults), p.CgoCXXFLAGS)
|
|
||||||
ldflags = stringList(envList("CGO_LDFLAGS", defaults), p.CgoLDFLAGS)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// envList returns the value of the given environment variable broken
|
|
||||||
// into fields, using the default value when the variable is empty.
|
|
||||||
func envList(key, def string) []string {
|
|
||||||
v := os.Getenv(key)
|
|
||||||
if v == "" {
|
|
||||||
v = def
|
|
||||||
}
|
|
||||||
return strings.Fields(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// stringList's arguments should be a sequence of string or []string values.
|
|
||||||
// stringList flattens them into a single []string.
|
|
||||||
func stringList(args ...interface{}) []string {
|
|
||||||
var x []string
|
|
||||||
for _, arg := range args {
|
|
||||||
switch arg := arg.(type) {
|
|
||||||
case []string:
|
|
||||||
x = append(x, arg...)
|
|
||||||
case string:
|
|
||||||
x = append(x, arg)
|
|
||||||
default:
|
|
||||||
panic("stringList: invalid argument")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return x
|
|
||||||
}
|
|
39
vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go
generated
vendored
39
vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go
generated
vendored
|
@ -1,39 +0,0 @@
|
||||||
// Copyright 2013 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package cgo
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"go/build"
|
|
||||||
"os/exec"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// pkgConfig runs pkg-config with the specified arguments and returns the flags it prints.
|
|
||||||
func pkgConfig(mode string, pkgs []string) (flags []string, err error) {
|
|
||||||
cmd := exec.Command("pkg-config", append([]string{mode}, pkgs...)...)
|
|
||||||
out, err := cmd.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
s := fmt.Sprintf("%s failed: %v", strings.Join(cmd.Args, " "), err)
|
|
||||||
if len(out) > 0 {
|
|
||||||
s = fmt.Sprintf("%s: %s", s, out)
|
|
||||||
}
|
|
||||||
return nil, errors.New(s)
|
|
||||||
}
|
|
||||||
if len(out) > 0 {
|
|
||||||
flags = strings.Fields(string(out))
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// pkgConfigFlags calls pkg-config if needed and returns the cflags
|
|
||||||
// needed to build the package.
|
|
||||||
func pkgConfigFlags(p *build.Package) (cflags []string, err error) {
|
|
||||||
if len(p.CgoPkgConfig) == 0 {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
return pkgConfig("--cflags", p.CgoPkgConfig)
|
|
||||||
}
|
|
117
vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go
generated
vendored
117
vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go
generated
vendored
|
@ -1,117 +0,0 @@
|
||||||
// Copyright 2018 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package packagesdriver fetches type sizes for go/packages and go/analysis.
|
|
||||||
package packagesdriver
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"go/types"
|
|
||||||
"os/exec"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"golang.org/x/tools/internal/gocommand"
|
|
||||||
)
|
|
||||||
|
|
||||||
var debug = false
|
|
||||||
|
|
||||||
func GetSizes(ctx context.Context, buildFlags, env []string, gocmdRunner *gocommand.Runner, dir string) (types.Sizes, error) {
|
|
||||||
// TODO(matloob): Clean this up. This code is mostly a copy of packages.findExternalDriver.
|
|
||||||
const toolPrefix = "GOPACKAGESDRIVER="
|
|
||||||
tool := ""
|
|
||||||
for _, env := range env {
|
|
||||||
if val := strings.TrimPrefix(env, toolPrefix); val != env {
|
|
||||||
tool = val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if tool == "" {
|
|
||||||
var err error
|
|
||||||
tool, err = exec.LookPath("gopackagesdriver")
|
|
||||||
if err != nil {
|
|
||||||
// We did not find the driver, so use "go list".
|
|
||||||
tool = "off"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if tool == "off" {
|
|
||||||
return GetSizesGolist(ctx, buildFlags, env, gocmdRunner, dir)
|
|
||||||
}
|
|
||||||
|
|
||||||
req, err := json.Marshal(struct {
|
|
||||||
Command string `json:"command"`
|
|
||||||
Env []string `json:"env"`
|
|
||||||
BuildFlags []string `json:"build_flags"`
|
|
||||||
}{
|
|
||||||
Command: "sizes",
|
|
||||||
Env: env,
|
|
||||||
BuildFlags: buildFlags,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to encode message to driver tool: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
buf := new(bytes.Buffer)
|
|
||||||
cmd := exec.CommandContext(ctx, tool)
|
|
||||||
cmd.Dir = dir
|
|
||||||
cmd.Env = env
|
|
||||||
cmd.Stdin = bytes.NewReader(req)
|
|
||||||
cmd.Stdout = buf
|
|
||||||
cmd.Stderr = new(bytes.Buffer)
|
|
||||||
if err := cmd.Run(); err != nil {
|
|
||||||
return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr)
|
|
||||||
}
|
|
||||||
var response struct {
|
|
||||||
// Sizes, if not nil, is the types.Sizes to use when type checking.
|
|
||||||
Sizes *types.StdSizes
|
|
||||||
}
|
|
||||||
if err := json.Unmarshal(buf.Bytes(), &response); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return response.Sizes, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func GetSizesGolist(ctx context.Context, buildFlags, env []string, gocmdRunner *gocommand.Runner, dir string) (types.Sizes, error) {
|
|
||||||
inv := gocommand.Invocation{
|
|
||||||
Verb: "list",
|
|
||||||
Args: []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"},
|
|
||||||
Env: env,
|
|
||||||
BuildFlags: buildFlags,
|
|
||||||
WorkingDir: dir,
|
|
||||||
}
|
|
||||||
stdout, stderr, friendlyErr, rawErr := gocmdRunner.RunRaw(ctx, inv)
|
|
||||||
var goarch, compiler string
|
|
||||||
if rawErr != nil {
|
|
||||||
if strings.Contains(rawErr.Error(), "cannot find main module") {
|
|
||||||
// User's running outside of a module. All bets are off. Get GOARCH and guess compiler is gc.
|
|
||||||
// TODO(matloob): Is this a problem in practice?
|
|
||||||
inv := gocommand.Invocation{
|
|
||||||
Verb: "env",
|
|
||||||
Args: []string{"GOARCH"},
|
|
||||||
Env: env,
|
|
||||||
WorkingDir: dir,
|
|
||||||
}
|
|
||||||
envout, enverr := gocmdRunner.Run(ctx, inv)
|
|
||||||
if enverr != nil {
|
|
||||||
return nil, enverr
|
|
||||||
}
|
|
||||||
goarch = strings.TrimSpace(envout.String())
|
|
||||||
compiler = "gc"
|
|
||||||
} else {
|
|
||||||
return nil, friendlyErr
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
fields := strings.Fields(stdout.String())
|
|
||||||
if len(fields) < 2 {
|
|
||||||
return nil, fmt.Errorf("could not parse GOARCH and Go compiler in format \"<GOARCH> <compiler>\":\nstdout: <<%s>>\nstderr: <<%s>>",
|
|
||||||
stdout.String(), stderr.String())
|
|
||||||
}
|
|
||||||
goarch = fields[0]
|
|
||||||
compiler = fields[1]
|
|
||||||
}
|
|
||||||
return types.SizesFor(compiler, goarch), nil
|
|
||||||
}
|
|
204
vendor/golang.org/x/tools/go/loader/doc.go
generated
vendored
204
vendor/golang.org/x/tools/go/loader/doc.go
generated
vendored
|
@ -1,204 +0,0 @@
|
||||||
// Copyright 2015 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package loader loads a complete Go program from source code, parsing
|
|
||||||
// and type-checking the initial packages plus their transitive closure
|
|
||||||
// of dependencies. The ASTs and the derived facts are retained for
|
|
||||||
// later use.
|
|
||||||
//
|
|
||||||
// Deprecated: This is an older API and does not have support
|
|
||||||
// for modules. Use golang.org/x/tools/go/packages instead.
|
|
||||||
//
|
|
||||||
// The package defines two primary types: Config, which specifies a
|
|
||||||
// set of initial packages to load and various other options; and
|
|
||||||
// Program, which is the result of successfully loading the packages
|
|
||||||
// specified by a configuration.
|
|
||||||
//
|
|
||||||
// The configuration can be set directly, but *Config provides various
|
|
||||||
// convenience methods to simplify the common cases, each of which can
|
|
||||||
// be called any number of times. Finally, these are followed by a
|
|
||||||
// call to Load() to actually load and type-check the program.
|
|
||||||
//
|
|
||||||
// var conf loader.Config
|
|
||||||
//
|
|
||||||
// // Use the command-line arguments to specify
|
|
||||||
// // a set of initial packages to load from source.
|
|
||||||
// // See FromArgsUsage for help.
|
|
||||||
// rest, err := conf.FromArgs(os.Args[1:], wantTests)
|
|
||||||
//
|
|
||||||
// // Parse the specified files and create an ad hoc package with path "foo".
|
|
||||||
// // All files must have the same 'package' declaration.
|
|
||||||
// conf.CreateFromFilenames("foo", "foo.go", "bar.go")
|
|
||||||
//
|
|
||||||
// // Create an ad hoc package with path "foo" from
|
|
||||||
// // the specified already-parsed files.
|
|
||||||
// // All ASTs must have the same 'package' declaration.
|
|
||||||
// conf.CreateFromFiles("foo", parsedFiles)
|
|
||||||
//
|
|
||||||
// // Add "runtime" to the set of packages to be loaded.
|
|
||||||
// conf.Import("runtime")
|
|
||||||
//
|
|
||||||
// // Adds "fmt" and "fmt_test" to the set of packages
|
|
||||||
// // to be loaded. "fmt" will include *_test.go files.
|
|
||||||
// conf.ImportWithTests("fmt")
|
|
||||||
//
|
|
||||||
// // Finally, load all the packages specified by the configuration.
|
|
||||||
// prog, err := conf.Load()
|
|
||||||
//
|
|
||||||
// See examples_test.go for examples of API usage.
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// CONCEPTS AND TERMINOLOGY
|
|
||||||
//
|
|
||||||
// The WORKSPACE is the set of packages accessible to the loader. The
|
|
||||||
// workspace is defined by Config.Build, a *build.Context. The
|
|
||||||
// default context treats subdirectories of $GOROOT and $GOPATH as
|
|
||||||
// packages, but this behavior may be overridden.
|
|
||||||
//
|
|
||||||
// An AD HOC package is one specified as a set of source files on the
|
|
||||||
// command line. In the simplest case, it may consist of a single file
|
|
||||||
// such as $GOROOT/src/net/http/triv.go.
|
|
||||||
//
|
|
||||||
// EXTERNAL TEST packages are those comprised of a set of *_test.go
|
|
||||||
// files all with the same 'package foo_test' declaration, all in the
|
|
||||||
// same directory. (go/build.Package calls these files XTestFiles.)
|
|
||||||
//
|
|
||||||
// An IMPORTABLE package is one that can be referred to by some import
|
|
||||||
// spec. Every importable package is uniquely identified by its
|
|
||||||
// PACKAGE PATH or just PATH, a string such as "fmt", "encoding/json",
|
|
||||||
// or "cmd/vendor/golang.org/x/arch/x86/x86asm". A package path
|
|
||||||
// typically denotes a subdirectory of the workspace.
|
|
||||||
//
|
|
||||||
// An import declaration uses an IMPORT PATH to refer to a package.
|
|
||||||
// Most import declarations use the package path as the import path.
|
|
||||||
//
|
|
||||||
// Due to VENDORING (https://golang.org/s/go15vendor), the
|
|
||||||
// interpretation of an import path may depend on the directory in which
|
|
||||||
// it appears. To resolve an import path to a package path, go/build
|
|
||||||
// must search the enclosing directories for a subdirectory named
|
|
||||||
// "vendor".
|
|
||||||
//
|
|
||||||
// ad hoc packages and external test packages are NON-IMPORTABLE. The
|
|
||||||
// path of an ad hoc package is inferred from the package
|
|
||||||
// declarations of its files and is therefore not a unique package key.
|
|
||||||
// For example, Config.CreatePkgs may specify two initial ad hoc
|
|
||||||
// packages, both with path "main".
|
|
||||||
//
|
|
||||||
// An AUGMENTED package is an importable package P plus all the
|
|
||||||
// *_test.go files with same 'package foo' declaration as P.
|
|
||||||
// (go/build.Package calls these files TestFiles.)
|
|
||||||
//
|
|
||||||
// The INITIAL packages are those specified in the configuration. A
|
|
||||||
// DEPENDENCY is a package loaded to satisfy an import in an initial
|
|
||||||
// package or another dependency.
|
|
||||||
//
|
|
||||||
package loader
|
|
||||||
|
|
||||||
// IMPLEMENTATION NOTES
|
|
||||||
//
|
|
||||||
// 'go test', in-package test files, and import cycles
|
|
||||||
// ---------------------------------------------------
|
|
||||||
//
|
|
||||||
// An external test package may depend upon members of the augmented
|
|
||||||
// package that are not in the unaugmented package, such as functions
|
|
||||||
// that expose internals. (See bufio/export_test.go for an example.)
|
|
||||||
// So, the loader must ensure that for each external test package
|
|
||||||
// it loads, it also augments the corresponding non-test package.
|
|
||||||
//
|
|
||||||
// The import graph over n unaugmented packages must be acyclic; the
|
|
||||||
// import graph over n-1 unaugmented packages plus one augmented
|
|
||||||
// package must also be acyclic. ('go test' relies on this.) But the
|
|
||||||
// import graph over n augmented packages may contain cycles.
|
|
||||||
//
|
|
||||||
// First, all the (unaugmented) non-test packages and their
|
|
||||||
// dependencies are imported in the usual way; the loader reports an
|
|
||||||
// error if it detects an import cycle.
|
|
||||||
//
|
|
||||||
// Then, each package P for which testing is desired is augmented by
|
|
||||||
// the list P' of its in-package test files, by calling
|
|
||||||
// (*types.Checker).Files. This arrangement ensures that P' may
|
|
||||||
// reference definitions within P, but P may not reference definitions
|
|
||||||
// within P'. Furthermore, P' may import any other package, including
|
|
||||||
// ones that depend upon P, without an import cycle error.
|
|
||||||
//
|
|
||||||
// Consider two packages A and B, both of which have lists of
|
|
||||||
// in-package test files we'll call A' and B', and which have the
|
|
||||||
// following import graph edges:
|
|
||||||
// B imports A
|
|
||||||
// B' imports A
|
|
||||||
// A' imports B
|
|
||||||
// This last edge would be expected to create an error were it not
|
|
||||||
// for the special type-checking discipline above.
|
|
||||||
// Cycles of size greater than two are possible. For example:
|
|
||||||
// compress/bzip2/bzip2_test.go (package bzip2) imports "io/ioutil"
|
|
||||||
// io/ioutil/tempfile_test.go (package ioutil) imports "regexp"
|
|
||||||
// regexp/exec_test.go (package regexp) imports "compress/bzip2"
|
|
||||||
//
|
|
||||||
//
|
|
||||||
// Concurrency
|
|
||||||
// -----------
|
|
||||||
//
|
|
||||||
// Let us define the import dependency graph as follows. Each node is a
|
|
||||||
// list of files passed to (Checker).Files at once. Many of these lists
|
|
||||||
// are the production code of an importable Go package, so those nodes
|
|
||||||
// are labelled by the package's path. The remaining nodes are
|
|
||||||
// ad hoc packages and lists of in-package *_test.go files that augment
|
|
||||||
// an importable package; those nodes have no label.
|
|
||||||
//
|
|
||||||
// The edges of the graph represent import statements appearing within a
|
|
||||||
// file. An edge connects a node (a list of files) to the node it
|
|
||||||
// imports, which is importable and thus always labelled.
|
|
||||||
//
|
|
||||||
// Loading is controlled by this dependency graph.
|
|
||||||
//
|
|
||||||
// To reduce I/O latency, we start loading a package's dependencies
|
|
||||||
// asynchronously as soon as we've parsed its files and enumerated its
|
|
||||||
// imports (scanImports). This performs a preorder traversal of the
|
|
||||||
// import dependency graph.
|
|
||||||
//
|
|
||||||
// To exploit hardware parallelism, we type-check unrelated packages in
|
|
||||||
// parallel, where "unrelated" means not ordered by the partial order of
|
|
||||||
// the import dependency graph.
|
|
||||||
//
|
|
||||||
// We use a concurrency-safe non-blocking cache (importer.imported) to
|
|
||||||
// record the results of type-checking, whether success or failure. An
|
|
||||||
// entry is created in this cache by startLoad the first time the
|
|
||||||
// package is imported. The first goroutine to request an entry becomes
|
|
||||||
// responsible for completing the task and broadcasting completion to
|
|
||||||
// subsequent requestors, which block until then.
|
|
||||||
//
|
|
||||||
// Type checking occurs in (parallel) postorder: we cannot type-check a
|
|
||||||
// set of files until we have loaded and type-checked all of their
|
|
||||||
// immediate dependencies (and thus all of their transitive
|
|
||||||
// dependencies). If the input were guaranteed free of import cycles,
|
|
||||||
// this would be trivial: we could simply wait for completion of the
|
|
||||||
// dependencies and then invoke the typechecker.
|
|
||||||
//
|
|
||||||
// But as we saw in the 'go test' section above, some cycles in the
|
|
||||||
// import graph over packages are actually legal, so long as the
|
|
||||||
// cycle-forming edge originates in the in-package test files that
|
|
||||||
// augment the package. This explains why the nodes of the import
|
|
||||||
// dependency graph are not packages, but lists of files: the unlabelled
|
|
||||||
// nodes avoid the cycles. Consider packages A and B where B imports A
|
|
||||||
// and A's in-package tests AT import B. The naively constructed import
|
|
||||||
// graph over packages would contain a cycle (A+AT) --> B --> (A+AT) but
|
|
||||||
// the graph over lists of files is AT --> B --> A, where AT is an
|
|
||||||
// unlabelled node.
|
|
||||||
//
|
|
||||||
// Awaiting completion of the dependencies in a cyclic graph would
|
|
||||||
// deadlock, so we must materialize the import dependency graph (as
|
|
||||||
// importer.graph) and check whether each import edge forms a cycle. If
|
|
||||||
// x imports y, and the graph already contains a path from y to x, then
|
|
||||||
// there is an import cycle, in which case the processing of x must not
|
|
||||||
// wait for the completion of processing of y.
|
|
||||||
//
|
|
||||||
// When the type-checker makes a callback (doImport) to the loader for a
|
|
||||||
// given import edge, there are two possible cases. In the normal case,
|
|
||||||
// the dependency has already been completely type-checked; doImport
|
|
||||||
// does a cache lookup and returns it. In the cyclic case, the entry in
|
|
||||||
// the cache is still necessarily incomplete, indicating a cycle. We
|
|
||||||
// perform the cycle check again to obtain the error message, and return
|
|
||||||
// the error.
|
|
||||||
//
|
|
||||||
// The result of using concurrency is about a 2.5x speedup for stdlib_test.
|
|
1086
vendor/golang.org/x/tools/go/loader/loader.go
generated
vendored
1086
vendor/golang.org/x/tools/go/loader/loader.go
generated
vendored
File diff suppressed because it is too large
Load diff
124
vendor/golang.org/x/tools/go/loader/util.go
generated
vendored
124
vendor/golang.org/x/tools/go/loader/util.go
generated
vendored
|
@ -1,124 +0,0 @@
|
||||||
// Copyright 2013 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package loader
|
|
||||||
|
|
||||||
import (
|
|
||||||
"go/ast"
|
|
||||||
"go/build"
|
|
||||||
"go/parser"
|
|
||||||
"go/token"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"strconv"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"golang.org/x/tools/go/buildutil"
|
|
||||||
)
|
|
||||||
|
|
||||||
// We use a counting semaphore to limit
|
|
||||||
// the number of parallel I/O calls per process.
|
|
||||||
var ioLimit = make(chan bool, 10)
|
|
||||||
|
|
||||||
// parseFiles parses the Go source files within directory dir and
|
|
||||||
// returns the ASTs of the ones that could be at least partially parsed,
|
|
||||||
// along with a list of I/O and parse errors encountered.
|
|
||||||
//
|
|
||||||
// I/O is done via ctxt, which may specify a virtual file system.
|
|
||||||
// displayPath is used to transform the filenames attached to the ASTs.
|
|
||||||
//
|
|
||||||
func parseFiles(fset *token.FileSet, ctxt *build.Context, displayPath func(string) string, dir string, files []string, mode parser.Mode) ([]*ast.File, []error) {
|
|
||||||
if displayPath == nil {
|
|
||||||
displayPath = func(path string) string { return path }
|
|
||||||
}
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
n := len(files)
|
|
||||||
parsed := make([]*ast.File, n)
|
|
||||||
errors := make([]error, n)
|
|
||||||
for i, file := range files {
|
|
||||||
if !buildutil.IsAbsPath(ctxt, file) {
|
|
||||||
file = buildutil.JoinPath(ctxt, dir, file)
|
|
||||||
}
|
|
||||||
wg.Add(1)
|
|
||||||
go func(i int, file string) {
|
|
||||||
ioLimit <- true // wait
|
|
||||||
defer func() {
|
|
||||||
wg.Done()
|
|
||||||
<-ioLimit // signal
|
|
||||||
}()
|
|
||||||
var rd io.ReadCloser
|
|
||||||
var err error
|
|
||||||
if ctxt.OpenFile != nil {
|
|
||||||
rd, err = ctxt.OpenFile(file)
|
|
||||||
} else {
|
|
||||||
rd, err = os.Open(file)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
errors[i] = err // open failed
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseFile may return both an AST and an error.
|
|
||||||
parsed[i], errors[i] = parser.ParseFile(fset, displayPath(file), rd, mode)
|
|
||||||
rd.Close()
|
|
||||||
}(i, file)
|
|
||||||
}
|
|
||||||
wg.Wait()
|
|
||||||
|
|
||||||
// Eliminate nils, preserving order.
|
|
||||||
var o int
|
|
||||||
for _, f := range parsed {
|
|
||||||
if f != nil {
|
|
||||||
parsed[o] = f
|
|
||||||
o++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
parsed = parsed[:o]
|
|
||||||
|
|
||||||
o = 0
|
|
||||||
for _, err := range errors {
|
|
||||||
if err != nil {
|
|
||||||
errors[o] = err
|
|
||||||
o++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
errors = errors[:o]
|
|
||||||
|
|
||||||
return parsed, errors
|
|
||||||
}
|
|
||||||
|
|
||||||
// scanImports returns the set of all import paths from all
|
|
||||||
// import specs in the specified files.
|
|
||||||
func scanImports(files []*ast.File) map[string]bool {
|
|
||||||
imports := make(map[string]bool)
|
|
||||||
for _, f := range files {
|
|
||||||
for _, decl := range f.Decls {
|
|
||||||
if decl, ok := decl.(*ast.GenDecl); ok && decl.Tok == token.IMPORT {
|
|
||||||
for _, spec := range decl.Specs {
|
|
||||||
spec := spec.(*ast.ImportSpec)
|
|
||||||
|
|
||||||
// NB: do not assume the program is well-formed!
|
|
||||||
path, err := strconv.Unquote(spec.Path.Value)
|
|
||||||
if err != nil {
|
|
||||||
continue // quietly ignore the error
|
|
||||||
}
|
|
||||||
if path == "C" {
|
|
||||||
continue // skip pseudopackage
|
|
||||||
}
|
|
||||||
imports[path] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return imports
|
|
||||||
}
|
|
||||||
|
|
||||||
// ---------- Internal helpers ----------
|
|
||||||
|
|
||||||
// TODO(adonovan): make this a method: func (*token.File) Contains(token.Pos)
|
|
||||||
func tokenFileContainsPos(f *token.File, pos token.Pos) bool {
|
|
||||||
p := int(pos)
|
|
||||||
base := f.Base()
|
|
||||||
return base <= p && p < base+f.Size()
|
|
||||||
}
|
|
221
vendor/golang.org/x/tools/go/packages/doc.go
generated
vendored
221
vendor/golang.org/x/tools/go/packages/doc.go
generated
vendored
|
@ -1,221 +0,0 @@
|
||||||
// Copyright 2018 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
/*
|
|
||||||
Package packages loads Go packages for inspection and analysis.
|
|
||||||
|
|
||||||
The Load function takes as input a list of patterns and return a list of Package
|
|
||||||
structs describing individual packages matched by those patterns.
|
|
||||||
The LoadMode controls the amount of detail in the loaded packages.
|
|
||||||
|
|
||||||
Load passes most patterns directly to the underlying build tool,
|
|
||||||
but all patterns with the prefix "query=", where query is a
|
|
||||||
non-empty string of letters from [a-z], are reserved and may be
|
|
||||||
interpreted as query operators.
|
|
||||||
|
|
||||||
Two query operators are currently supported: "file" and "pattern".
|
|
||||||
|
|
||||||
The query "file=path/to/file.go" matches the package or packages enclosing
|
|
||||||
the Go source file path/to/file.go. For example "file=~/go/src/fmt/print.go"
|
|
||||||
might return the packages "fmt" and "fmt [fmt.test]".
|
|
||||||
|
|
||||||
The query "pattern=string" causes "string" to be passed directly to
|
|
||||||
the underlying build tool. In most cases this is unnecessary,
|
|
||||||
but an application can use Load("pattern=" + x) as an escaping mechanism
|
|
||||||
to ensure that x is not interpreted as a query operator if it contains '='.
|
|
||||||
|
|
||||||
All other query operators are reserved for future use and currently
|
|
||||||
cause Load to report an error.
|
|
||||||
|
|
||||||
The Package struct provides basic information about the package, including
|
|
||||||
|
|
||||||
- ID, a unique identifier for the package in the returned set;
|
|
||||||
- GoFiles, the names of the package's Go source files;
|
|
||||||
- Imports, a map from source import strings to the Packages they name;
|
|
||||||
- Types, the type information for the package's exported symbols;
|
|
||||||
- Syntax, the parsed syntax trees for the package's source code; and
|
|
||||||
- TypeInfo, the result of a complete type-check of the package syntax trees.
|
|
||||||
|
|
||||||
(See the documentation for type Package for the complete list of fields
|
|
||||||
and more detailed descriptions.)
|
|
||||||
|
|
||||||
For example,
|
|
||||||
|
|
||||||
Load(nil, "bytes", "unicode...")
|
|
||||||
|
|
||||||
returns four Package structs describing the standard library packages
|
|
||||||
bytes, unicode, unicode/utf16, and unicode/utf8. Note that one pattern
|
|
||||||
can match multiple packages and that a package might be matched by
|
|
||||||
multiple patterns: in general it is not possible to determine which
|
|
||||||
packages correspond to which patterns.
|
|
||||||
|
|
||||||
Note that the list returned by Load contains only the packages matched
|
|
||||||
by the patterns. Their dependencies can be found by walking the import
|
|
||||||
graph using the Imports fields.
|
|
||||||
|
|
||||||
The Load function can be configured by passing a pointer to a Config as
|
|
||||||
the first argument. A nil Config is equivalent to the zero Config, which
|
|
||||||
causes Load to run in LoadFiles mode, collecting minimal information.
|
|
||||||
See the documentation for type Config for details.
|
|
||||||
|
|
||||||
As noted earlier, the Config.Mode controls the amount of detail
|
|
||||||
reported about the loaded packages. See the documentation for type LoadMode
|
|
||||||
for details.
|
|
||||||
|
|
||||||
Most tools should pass their command-line arguments (after any flags)
|
|
||||||
uninterpreted to the loader, so that the loader can interpret them
|
|
||||||
according to the conventions of the underlying build system.
|
|
||||||
See the Example function for typical usage.
|
|
||||||
|
|
||||||
*/
|
|
||||||
package packages // import "golang.org/x/tools/go/packages"
|
|
||||||
|
|
||||||
/*
|
|
||||||
|
|
||||||
Motivation and design considerations
|
|
||||||
|
|
||||||
The new package's design solves problems addressed by two existing
|
|
||||||
packages: go/build, which locates and describes packages, and
|
|
||||||
golang.org/x/tools/go/loader, which loads, parses and type-checks them.
|
|
||||||
The go/build.Package structure encodes too much of the 'go build' way
|
|
||||||
of organizing projects, leaving us in need of a data type that describes a
|
|
||||||
package of Go source code independent of the underlying build system.
|
|
||||||
We wanted something that works equally well with go build and vgo, and
|
|
||||||
also other build systems such as Bazel and Blaze, making it possible to
|
|
||||||
construct analysis tools that work in all these environments.
|
|
||||||
Tools such as errcheck and staticcheck were essentially unavailable to
|
|
||||||
the Go community at Google, and some of Google's internal tools for Go
|
|
||||||
are unavailable externally.
|
|
||||||
This new package provides a uniform way to obtain package metadata by
|
|
||||||
querying each of these build systems, optionally supporting their
|
|
||||||
preferred command-line notations for packages, so that tools integrate
|
|
||||||
neatly with users' build environments. The Metadata query function
|
|
||||||
executes an external query tool appropriate to the current workspace.
|
|
||||||
|
|
||||||
Loading packages always returns the complete import graph "all the way down",
|
|
||||||
even if all you want is information about a single package, because the query
|
|
||||||
mechanisms of all the build systems we currently support ({go,vgo} list, and
|
|
||||||
blaze/bazel aspect-based query) cannot provide detailed information
|
|
||||||
about one package without visiting all its dependencies too, so there is
|
|
||||||
no additional asymptotic cost to providing transitive information.
|
|
||||||
(This property might not be true of a hypothetical 5th build system.)
|
|
||||||
|
|
||||||
In calls to TypeCheck, all initial packages, and any package that
|
|
||||||
transitively depends on one of them, must be loaded from source.
|
|
||||||
Consider A->B->C->D->E: if A,C are initial, A,B,C must be loaded from
|
|
||||||
source; D may be loaded from export data, and E may not be loaded at all
|
|
||||||
(though it's possible that D's export data mentions it, so a
|
|
||||||
types.Package may be created for it and exposed.)
|
|
||||||
|
|
||||||
The old loader had a feature to suppress type-checking of function
|
|
||||||
bodies on a per-package basis, primarily intended to reduce the work of
|
|
||||||
obtaining type information for imported packages. Now that imports are
|
|
||||||
satisfied by export data, the optimization no longer seems necessary.
|
|
||||||
|
|
||||||
Despite some early attempts, the old loader did not exploit export data,
|
|
||||||
instead always using the equivalent of WholeProgram mode. This was due
|
|
||||||
to the complexity of mixing source and export data packages (now
|
|
||||||
resolved by the upward traversal mentioned above), and because export data
|
|
||||||
files were nearly always missing or stale. Now that 'go build' supports
|
|
||||||
caching, all the underlying build systems can guarantee to produce
|
|
||||||
export data in a reasonable (amortized) time.
|
|
||||||
|
|
||||||
Test "main" packages synthesized by the build system are now reported as
|
|
||||||
first-class packages, avoiding the need for clients (such as go/ssa) to
|
|
||||||
reinvent this generation logic.
|
|
||||||
|
|
||||||
One way in which go/packages is simpler than the old loader is in its
|
|
||||||
treatment of in-package tests. In-package tests are packages that
|
|
||||||
consist of all the files of the library under test, plus the test files.
|
|
||||||
The old loader constructed in-package tests by a two-phase process of
|
|
||||||
mutation called "augmentation": first it would construct and type check
|
|
||||||
all the ordinary library packages and type-check the packages that
|
|
||||||
depend on them; then it would add more (test) files to the package and
|
|
||||||
type-check again. This two-phase approach had four major problems:
|
|
||||||
1) in processing the tests, the loader modified the library package,
|
|
||||||
leaving no way for a client application to see both the test
|
|
||||||
package and the library package; one would mutate into the other.
|
|
||||||
2) because test files can declare additional methods on types defined in
|
|
||||||
the library portion of the package, the dispatch of method calls in
|
|
||||||
the library portion was affected by the presence of the test files.
|
|
||||||
This should have been a clue that the packages were logically
|
|
||||||
different.
|
|
||||||
3) this model of "augmentation" assumed at most one in-package test
|
|
||||||
per library package, which is true of projects using 'go build',
|
|
||||||
but not other build systems.
|
|
||||||
4) because of the two-phase nature of test processing, all packages that
|
|
||||||
import the library package had to be processed before augmentation,
|
|
||||||
forcing a "one-shot" API and preventing the client from calling Load
|
|
||||||
in several times in sequence as is now possible in WholeProgram mode.
|
|
||||||
(TypeCheck mode has a similar one-shot restriction for a different reason.)
|
|
||||||
|
|
||||||
Early drafts of this package supported "multi-shot" operation.
|
|
||||||
Although it allowed clients to make a sequence of calls (or concurrent
|
|
||||||
calls) to Load, building up the graph of Packages incrementally,
|
|
||||||
it was of marginal value: it complicated the API
|
|
||||||
(since it allowed some options to vary across calls but not others),
|
|
||||||
it complicated the implementation,
|
|
||||||
it cannot be made to work in Types mode, as explained above,
|
|
||||||
and it was less efficient than making one combined call (when this is possible).
|
|
||||||
Among the clients we have inspected, none made multiple calls to load
|
|
||||||
but could not be easily and satisfactorily modified to make only a single call.
|
|
||||||
However, applications changes may be required.
|
|
||||||
For example, the ssadump command loads the user-specified packages
|
|
||||||
and in addition the runtime package. It is tempting to simply append
|
|
||||||
"runtime" to the user-provided list, but that does not work if the user
|
|
||||||
specified an ad-hoc package such as [a.go b.go].
|
|
||||||
Instead, ssadump no longer requests the runtime package,
|
|
||||||
but seeks it among the dependencies of the user-specified packages,
|
|
||||||
and emits an error if it is not found.
|
|
||||||
|
|
||||||
Overlays: The Overlay field in the Config allows providing alternate contents
|
|
||||||
for Go source files, by providing a mapping from file path to contents.
|
|
||||||
go/packages will pull in new imports added in overlay files when go/packages
|
|
||||||
is run in LoadImports mode or greater.
|
|
||||||
Overlay support for the go list driver isn't complete yet: if the file doesn't
|
|
||||||
exist on disk, it will only be recognized in an overlay if it is a non-test file
|
|
||||||
and the package would be reported even without the overlay.
|
|
||||||
|
|
||||||
Questions & Tasks
|
|
||||||
|
|
||||||
- Add GOARCH/GOOS?
|
|
||||||
They are not portable concepts, but could be made portable.
|
|
||||||
Our goal has been to allow users to express themselves using the conventions
|
|
||||||
of the underlying build system: if the build system honors GOARCH
|
|
||||||
during a build and during a metadata query, then so should
|
|
||||||
applications built atop that query mechanism.
|
|
||||||
Conversely, if the target architecture of the build is determined by
|
|
||||||
command-line flags, the application can pass the relevant
|
|
||||||
flags through to the build system using a command such as:
|
|
||||||
myapp -query_flag="--cpu=amd64" -query_flag="--os=darwin"
|
|
||||||
However, this approach is low-level, unwieldy, and non-portable.
|
|
||||||
GOOS and GOARCH seem important enough to warrant a dedicated option.
|
|
||||||
|
|
||||||
- How should we handle partial failures such as a mixture of good and
|
|
||||||
malformed patterns, existing and non-existent packages, successful and
|
|
||||||
failed builds, import failures, import cycles, and so on, in a call to
|
|
||||||
Load?
|
|
||||||
|
|
||||||
- Support bazel, blaze, and go1.10 list, not just go1.11 list.
|
|
||||||
|
|
||||||
- Handle (and test) various partial success cases, e.g.
|
|
||||||
a mixture of good packages and:
|
|
||||||
invalid patterns
|
|
||||||
nonexistent packages
|
|
||||||
empty packages
|
|
||||||
packages with malformed package or import declarations
|
|
||||||
unreadable files
|
|
||||||
import cycles
|
|
||||||
other parse errors
|
|
||||||
type errors
|
|
||||||
Make sure we record errors at the correct place in the graph.
|
|
||||||
|
|
||||||
- Missing packages among initial arguments are not reported.
|
|
||||||
Return bogus packages for them, like golist does.
|
|
||||||
|
|
||||||
- "undeclared name" errors (for example) are reported out of source file
|
|
||||||
order. I suspect this is due to the breadth-first resolution now used
|
|
||||||
by go/types. Is that a bug? Discuss with gri.
|
|
||||||
|
|
||||||
*/
|
|
101
vendor/golang.org/x/tools/go/packages/external.go
generated
vendored
101
vendor/golang.org/x/tools/go/packages/external.go
generated
vendored
|
@ -1,101 +0,0 @@
|
||||||
// Copyright 2018 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// This file enables an external tool to intercept package requests.
|
|
||||||
// If the tool is present then its results are used in preference to
|
|
||||||
// the go list command.
|
|
||||||
|
|
||||||
package packages
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// The Driver Protocol
|
|
||||||
//
|
|
||||||
// The driver, given the inputs to a call to Load, returns metadata about the packages specified.
|
|
||||||
// This allows for different build systems to support go/packages by telling go/packages how the
|
|
||||||
// packages' source is organized.
|
|
||||||
// The driver is a binary, either specified by the GOPACKAGESDRIVER environment variable or in
|
|
||||||
// the path as gopackagesdriver. It's given the inputs to load in its argv. See the package
|
|
||||||
// documentation in doc.go for the full description of the patterns that need to be supported.
|
|
||||||
// A driver receives as a JSON-serialized driverRequest struct in standard input and will
|
|
||||||
// produce a JSON-serialized driverResponse (see definition in packages.go) in its standard output.
|
|
||||||
|
|
||||||
// driverRequest is used to provide the portion of Load's Config that is needed by a driver.
|
|
||||||
type driverRequest struct {
|
|
||||||
Mode LoadMode `json:"mode"`
|
|
||||||
// Env specifies the environment the underlying build system should be run in.
|
|
||||||
Env []string `json:"env"`
|
|
||||||
// BuildFlags are flags that should be passed to the underlying build system.
|
|
||||||
BuildFlags []string `json:"build_flags"`
|
|
||||||
// Tests specifies whether the patterns should also return test packages.
|
|
||||||
Tests bool `json:"tests"`
|
|
||||||
// Overlay maps file paths (relative to the driver's working directory) to the byte contents
|
|
||||||
// of overlay files.
|
|
||||||
Overlay map[string][]byte `json:"overlay"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// findExternalDriver returns the file path of a tool that supplies
|
|
||||||
// the build system package structure, or "" if not found."
|
|
||||||
// If GOPACKAGESDRIVER is set in the environment findExternalTool returns its
|
|
||||||
// value, otherwise it searches for a binary named gopackagesdriver on the PATH.
|
|
||||||
func findExternalDriver(cfg *Config) driver {
|
|
||||||
const toolPrefix = "GOPACKAGESDRIVER="
|
|
||||||
tool := ""
|
|
||||||
for _, env := range cfg.Env {
|
|
||||||
if val := strings.TrimPrefix(env, toolPrefix); val != env {
|
|
||||||
tool = val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if tool != "" && tool == "off" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if tool == "" {
|
|
||||||
var err error
|
|
||||||
tool, err = exec.LookPath("gopackagesdriver")
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return func(cfg *Config, words ...string) (*driverResponse, error) {
|
|
||||||
req, err := json.Marshal(driverRequest{
|
|
||||||
Mode: cfg.Mode,
|
|
||||||
Env: cfg.Env,
|
|
||||||
BuildFlags: cfg.BuildFlags,
|
|
||||||
Tests: cfg.Tests,
|
|
||||||
Overlay: cfg.Overlay,
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("failed to encode message to driver tool: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
buf := new(bytes.Buffer)
|
|
||||||
stderr := new(bytes.Buffer)
|
|
||||||
cmd := exec.CommandContext(cfg.Context, tool, words...)
|
|
||||||
cmd.Dir = cfg.Dir
|
|
||||||
cmd.Env = cfg.Env
|
|
||||||
cmd.Stdin = bytes.NewReader(req)
|
|
||||||
cmd.Stdout = buf
|
|
||||||
cmd.Stderr = stderr
|
|
||||||
|
|
||||||
if err := cmd.Run(); err != nil {
|
|
||||||
return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr)
|
|
||||||
}
|
|
||||||
if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTDRIVERERRORS") != "" {
|
|
||||||
fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd, words...), stderr)
|
|
||||||
}
|
|
||||||
|
|
||||||
var response driverResponse
|
|
||||||
if err := json.Unmarshal(buf.Bytes(), &response); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &response, nil
|
|
||||||
}
|
|
||||||
}
|
|
907
vendor/golang.org/x/tools/go/packages/golist.go
generated
vendored
907
vendor/golang.org/x/tools/go/packages/golist.go
generated
vendored
|
@ -1,907 +0,0 @@
|
||||||
// Copyright 2018 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package packages
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"context"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"go/types"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"os/exec"
|
|
||||||
"path"
|
|
||||||
"path/filepath"
|
|
||||||
"reflect"
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"sync"
|
|
||||||
"unicode"
|
|
||||||
|
|
||||||
"golang.org/x/tools/go/internal/packagesdriver"
|
|
||||||
"golang.org/x/tools/internal/gocommand"
|
|
||||||
"golang.org/x/xerrors"
|
|
||||||
)
|
|
||||||
|
|
||||||
// debug controls verbose logging.
|
|
||||||
var debug, _ = strconv.ParseBool(os.Getenv("GOPACKAGESDEBUG"))
|
|
||||||
|
|
||||||
// A goTooOldError reports that the go command
|
|
||||||
// found by exec.LookPath is too old to use the new go list behavior.
|
|
||||||
type goTooOldError struct {
|
|
||||||
error
|
|
||||||
}
|
|
||||||
|
|
||||||
// responseDeduper wraps a driverResponse, deduplicating its contents.
|
|
||||||
type responseDeduper struct {
|
|
||||||
seenRoots map[string]bool
|
|
||||||
seenPackages map[string]*Package
|
|
||||||
dr *driverResponse
|
|
||||||
}
|
|
||||||
|
|
||||||
func newDeduper() *responseDeduper {
|
|
||||||
return &responseDeduper{
|
|
||||||
dr: &driverResponse{},
|
|
||||||
seenRoots: map[string]bool{},
|
|
||||||
seenPackages: map[string]*Package{},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// addAll fills in r with a driverResponse.
|
|
||||||
func (r *responseDeduper) addAll(dr *driverResponse) {
|
|
||||||
for _, pkg := range dr.Packages {
|
|
||||||
r.addPackage(pkg)
|
|
||||||
}
|
|
||||||
for _, root := range dr.Roots {
|
|
||||||
r.addRoot(root)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *responseDeduper) addPackage(p *Package) {
|
|
||||||
if r.seenPackages[p.ID] != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
r.seenPackages[p.ID] = p
|
|
||||||
r.dr.Packages = append(r.dr.Packages, p)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *responseDeduper) addRoot(id string) {
|
|
||||||
if r.seenRoots[id] {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
r.seenRoots[id] = true
|
|
||||||
r.dr.Roots = append(r.dr.Roots, id)
|
|
||||||
}
|
|
||||||
|
|
||||||
type golistState struct {
|
|
||||||
cfg *Config
|
|
||||||
ctx context.Context
|
|
||||||
|
|
||||||
envOnce sync.Once
|
|
||||||
goEnvError error
|
|
||||||
goEnv map[string]string
|
|
||||||
|
|
||||||
rootsOnce sync.Once
|
|
||||||
rootDirsError error
|
|
||||||
rootDirs map[string]string
|
|
||||||
|
|
||||||
// vendorDirs caches the (non)existence of vendor directories.
|
|
||||||
vendorDirs map[string]bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// getEnv returns Go environment variables. Only specific variables are
|
|
||||||
// populated -- computing all of them is slow.
|
|
||||||
func (state *golistState) getEnv() (map[string]string, error) {
|
|
||||||
state.envOnce.Do(func() {
|
|
||||||
var b *bytes.Buffer
|
|
||||||
b, state.goEnvError = state.invokeGo("env", "-json", "GOMOD", "GOPATH")
|
|
||||||
if state.goEnvError != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
state.goEnv = make(map[string]string)
|
|
||||||
decoder := json.NewDecoder(b)
|
|
||||||
if state.goEnvError = decoder.Decode(&state.goEnv); state.goEnvError != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
})
|
|
||||||
return state.goEnv, state.goEnvError
|
|
||||||
}
|
|
||||||
|
|
||||||
// mustGetEnv is a convenience function that can be used if getEnv has already succeeded.
|
|
||||||
func (state *golistState) mustGetEnv() map[string]string {
|
|
||||||
env, err := state.getEnv()
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Sprintf("mustGetEnv: %v", err))
|
|
||||||
}
|
|
||||||
return env
|
|
||||||
}
|
|
||||||
|
|
||||||
// goListDriver uses the go list command to interpret the patterns and produce
|
|
||||||
// the build system package structure.
|
|
||||||
// See driver for more details.
|
|
||||||
func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) {
|
|
||||||
// Make sure that any asynchronous go commands are killed when we return.
|
|
||||||
parentCtx := cfg.Context
|
|
||||||
if parentCtx == nil {
|
|
||||||
parentCtx = context.Background()
|
|
||||||
}
|
|
||||||
ctx, cancel := context.WithCancel(parentCtx)
|
|
||||||
defer cancel()
|
|
||||||
|
|
||||||
response := newDeduper()
|
|
||||||
|
|
||||||
// Fill in response.Sizes asynchronously if necessary.
|
|
||||||
var sizeserr error
|
|
||||||
var sizeswg sync.WaitGroup
|
|
||||||
if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 {
|
|
||||||
sizeswg.Add(1)
|
|
||||||
go func() {
|
|
||||||
var sizes types.Sizes
|
|
||||||
sizes, sizeserr = packagesdriver.GetSizesGolist(ctx, cfg.BuildFlags, cfg.Env, cfg.gocmdRunner, cfg.Dir)
|
|
||||||
// types.SizesFor always returns nil or a *types.StdSizes.
|
|
||||||
response.dr.Sizes, _ = sizes.(*types.StdSizes)
|
|
||||||
sizeswg.Done()
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
state := &golistState{
|
|
||||||
cfg: cfg,
|
|
||||||
ctx: ctx,
|
|
||||||
vendorDirs: map[string]bool{},
|
|
||||||
}
|
|
||||||
|
|
||||||
// Determine files requested in contains patterns
|
|
||||||
var containFiles []string
|
|
||||||
restPatterns := make([]string, 0, len(patterns))
|
|
||||||
// Extract file= and other [querytype]= patterns. Report an error if querytype
|
|
||||||
// doesn't exist.
|
|
||||||
extractQueries:
|
|
||||||
for _, pattern := range patterns {
|
|
||||||
eqidx := strings.Index(pattern, "=")
|
|
||||||
if eqidx < 0 {
|
|
||||||
restPatterns = append(restPatterns, pattern)
|
|
||||||
} else {
|
|
||||||
query, value := pattern[:eqidx], pattern[eqidx+len("="):]
|
|
||||||
switch query {
|
|
||||||
case "file":
|
|
||||||
containFiles = append(containFiles, value)
|
|
||||||
case "pattern":
|
|
||||||
restPatterns = append(restPatterns, value)
|
|
||||||
case "": // not a reserved query
|
|
||||||
restPatterns = append(restPatterns, pattern)
|
|
||||||
default:
|
|
||||||
for _, rune := range query {
|
|
||||||
if rune < 'a' || rune > 'z' { // not a reserved query
|
|
||||||
restPatterns = append(restPatterns, pattern)
|
|
||||||
continue extractQueries
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Reject all other patterns containing "="
|
|
||||||
return nil, fmt.Errorf("invalid query type %q in query pattern %q", query, pattern)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// See if we have any patterns to pass through to go list. Zero initial
|
|
||||||
// patterns also requires a go list call, since it's the equivalent of
|
|
||||||
// ".".
|
|
||||||
if len(restPatterns) > 0 || len(patterns) == 0 {
|
|
||||||
dr, err := state.createDriverResponse(restPatterns...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
response.addAll(dr)
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(containFiles) != 0 {
|
|
||||||
if err := state.runContainsQueries(response, containFiles); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
modifiedPkgs, needPkgs, err := state.processGolistOverlay(response)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var containsCandidates []string
|
|
||||||
if len(containFiles) > 0 {
|
|
||||||
containsCandidates = append(containsCandidates, modifiedPkgs...)
|
|
||||||
containsCandidates = append(containsCandidates, needPkgs...)
|
|
||||||
}
|
|
||||||
if err := state.addNeededOverlayPackages(response, needPkgs); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// Check candidate packages for containFiles.
|
|
||||||
if len(containFiles) > 0 {
|
|
||||||
for _, id := range containsCandidates {
|
|
||||||
pkg, ok := response.seenPackages[id]
|
|
||||||
if !ok {
|
|
||||||
response.addPackage(&Package{
|
|
||||||
ID: id,
|
|
||||||
Errors: []Error{
|
|
||||||
{
|
|
||||||
Kind: ListError,
|
|
||||||
Msg: fmt.Sprintf("package %s expected but not seen", id),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
for _, f := range containFiles {
|
|
||||||
for _, g := range pkg.GoFiles {
|
|
||||||
if sameFile(f, g) {
|
|
||||||
response.addRoot(id)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sizeswg.Wait()
|
|
||||||
if sizeserr != nil {
|
|
||||||
return nil, sizeserr
|
|
||||||
}
|
|
||||||
return response.dr, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (state *golistState) addNeededOverlayPackages(response *responseDeduper, pkgs []string) error {
|
|
||||||
if len(pkgs) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
dr, err := state.createDriverResponse(pkgs...)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for _, pkg := range dr.Packages {
|
|
||||||
response.addPackage(pkg)
|
|
||||||
}
|
|
||||||
_, needPkgs, err := state.processGolistOverlay(response)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return state.addNeededOverlayPackages(response, needPkgs)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (state *golistState) runContainsQueries(response *responseDeduper, queries []string) error {
|
|
||||||
for _, query := range queries {
|
|
||||||
// TODO(matloob): Do only one query per directory.
|
|
||||||
fdir := filepath.Dir(query)
|
|
||||||
// Pass absolute path of directory to go list so that it knows to treat it as a directory,
|
|
||||||
// not a package path.
|
|
||||||
pattern, err := filepath.Abs(fdir)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("could not determine absolute path of file= query path %q: %v", query, err)
|
|
||||||
}
|
|
||||||
dirResponse, err := state.createDriverResponse(pattern)
|
|
||||||
|
|
||||||
// If there was an error loading the package, or the package is returned
|
|
||||||
// with errors, try to load the file as an ad-hoc package.
|
|
||||||
// Usually the error will appear in a returned package, but may not if we're
|
|
||||||
// in module mode and the ad-hoc is located outside a module.
|
|
||||||
if err != nil || len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].GoFiles) == 0 &&
|
|
||||||
len(dirResponse.Packages[0].Errors) == 1 {
|
|
||||||
var queryErr error
|
|
||||||
if dirResponse, queryErr = state.adhocPackage(pattern, query); queryErr != nil {
|
|
||||||
return err // return the original error
|
|
||||||
}
|
|
||||||
}
|
|
||||||
isRoot := make(map[string]bool, len(dirResponse.Roots))
|
|
||||||
for _, root := range dirResponse.Roots {
|
|
||||||
isRoot[root] = true
|
|
||||||
}
|
|
||||||
for _, pkg := range dirResponse.Packages {
|
|
||||||
// Add any new packages to the main set
|
|
||||||
// We don't bother to filter packages that will be dropped by the changes of roots,
|
|
||||||
// that will happen anyway during graph construction outside this function.
|
|
||||||
// Over-reporting packages is not a problem.
|
|
||||||
response.addPackage(pkg)
|
|
||||||
// if the package was not a root one, it cannot have the file
|
|
||||||
if !isRoot[pkg.ID] {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
for _, pkgFile := range pkg.GoFiles {
|
|
||||||
if filepath.Base(query) == filepath.Base(pkgFile) {
|
|
||||||
response.addRoot(pkg.ID)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// adhocPackage attempts to load or construct an ad-hoc package for a given
|
|
||||||
// query, if the original call to the driver produced inadequate results.
|
|
||||||
func (state *golistState) adhocPackage(pattern, query string) (*driverResponse, error) {
|
|
||||||
response, err := state.createDriverResponse(query)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
// If we get nothing back from `go list`,
|
|
||||||
// try to make this file into its own ad-hoc package.
|
|
||||||
// TODO(rstambler): Should this check against the original response?
|
|
||||||
if len(response.Packages) == 0 {
|
|
||||||
response.Packages = append(response.Packages, &Package{
|
|
||||||
ID: "command-line-arguments",
|
|
||||||
PkgPath: query,
|
|
||||||
GoFiles: []string{query},
|
|
||||||
CompiledGoFiles: []string{query},
|
|
||||||
Imports: make(map[string]*Package),
|
|
||||||
})
|
|
||||||
response.Roots = append(response.Roots, "command-line-arguments")
|
|
||||||
}
|
|
||||||
// Handle special cases.
|
|
||||||
if len(response.Packages) == 1 {
|
|
||||||
// golang/go#33482: If this is a file= query for ad-hoc packages where
|
|
||||||
// the file only exists on an overlay, and exists outside of a module,
|
|
||||||
// add the file to the package and remove the errors.
|
|
||||||
if response.Packages[0].ID == "command-line-arguments" ||
|
|
||||||
filepath.ToSlash(response.Packages[0].PkgPath) == filepath.ToSlash(query) {
|
|
||||||
if len(response.Packages[0].GoFiles) == 0 {
|
|
||||||
filename := filepath.Join(pattern, filepath.Base(query)) // avoid recomputing abspath
|
|
||||||
// TODO(matloob): check if the file is outside of a root dir?
|
|
||||||
for path := range state.cfg.Overlay {
|
|
||||||
if path == filename {
|
|
||||||
response.Packages[0].Errors = nil
|
|
||||||
response.Packages[0].GoFiles = []string{path}
|
|
||||||
response.Packages[0].CompiledGoFiles = []string{path}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return response, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fields must match go list;
|
|
||||||
// see $GOROOT/src/cmd/go/internal/load/pkg.go.
|
|
||||||
type jsonPackage struct {
|
|
||||||
ImportPath string
|
|
||||||
Dir string
|
|
||||||
Name string
|
|
||||||
Export string
|
|
||||||
GoFiles []string
|
|
||||||
CompiledGoFiles []string
|
|
||||||
CFiles []string
|
|
||||||
CgoFiles []string
|
|
||||||
CXXFiles []string
|
|
||||||
MFiles []string
|
|
||||||
HFiles []string
|
|
||||||
FFiles []string
|
|
||||||
SFiles []string
|
|
||||||
SwigFiles []string
|
|
||||||
SwigCXXFiles []string
|
|
||||||
SysoFiles []string
|
|
||||||
Imports []string
|
|
||||||
ImportMap map[string]string
|
|
||||||
Deps []string
|
|
||||||
Module *Module
|
|
||||||
TestGoFiles []string
|
|
||||||
TestImports []string
|
|
||||||
XTestGoFiles []string
|
|
||||||
XTestImports []string
|
|
||||||
ForTest string // q in a "p [q.test]" package, else ""
|
|
||||||
DepOnly bool
|
|
||||||
|
|
||||||
Error *jsonPackageError
|
|
||||||
}
|
|
||||||
|
|
||||||
type jsonPackageError struct {
|
|
||||||
ImportStack []string
|
|
||||||
Pos string
|
|
||||||
Err string
|
|
||||||
}
|
|
||||||
|
|
||||||
func otherFiles(p *jsonPackage) [][]string {
|
|
||||||
return [][]string{p.CFiles, p.CXXFiles, p.MFiles, p.HFiles, p.FFiles, p.SFiles, p.SwigFiles, p.SwigCXXFiles, p.SysoFiles}
|
|
||||||
}
|
|
||||||
|
|
||||||
// createDriverResponse uses the "go list" command to expand the pattern
|
|
||||||
// words and return a response for the specified packages.
|
|
||||||
func (state *golistState) createDriverResponse(words ...string) (*driverResponse, error) {
|
|
||||||
// go list uses the following identifiers in ImportPath and Imports:
|
|
||||||
//
|
|
||||||
// "p" -- importable package or main (command)
|
|
||||||
// "q.test" -- q's test executable
|
|
||||||
// "p [q.test]" -- variant of p as built for q's test executable
|
|
||||||
// "q_test [q.test]" -- q's external test package
|
|
||||||
//
|
|
||||||
// The packages p that are built differently for a test q.test
|
|
||||||
// are q itself, plus any helpers used by the external test q_test,
|
|
||||||
// typically including "testing" and all its dependencies.
|
|
||||||
|
|
||||||
// Run "go list" for complete
|
|
||||||
// information on the specified packages.
|
|
||||||
buf, err := state.invokeGo("list", golistargs(state.cfg, words)...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
seen := make(map[string]*jsonPackage)
|
|
||||||
pkgs := make(map[string]*Package)
|
|
||||||
additionalErrors := make(map[string][]Error)
|
|
||||||
// Decode the JSON and convert it to Package form.
|
|
||||||
var response driverResponse
|
|
||||||
for dec := json.NewDecoder(buf); dec.More(); {
|
|
||||||
p := new(jsonPackage)
|
|
||||||
if err := dec.Decode(p); err != nil {
|
|
||||||
return nil, fmt.Errorf("JSON decoding failed: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if p.ImportPath == "" {
|
|
||||||
// The documentation for go list says that “[e]rroneous packages will have
|
|
||||||
// a non-empty ImportPath”. If for some reason it comes back empty, we
|
|
||||||
// prefer to error out rather than silently discarding data or handing
|
|
||||||
// back a package without any way to refer to it.
|
|
||||||
if p.Error != nil {
|
|
||||||
return nil, Error{
|
|
||||||
Pos: p.Error.Pos,
|
|
||||||
Msg: p.Error.Err,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, fmt.Errorf("package missing import path: %+v", p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Work around https://golang.org/issue/33157:
|
|
||||||
// go list -e, when given an absolute path, will find the package contained at
|
|
||||||
// that directory. But when no package exists there, it will return a fake package
|
|
||||||
// with an error and the ImportPath set to the absolute path provided to go list.
|
|
||||||
// Try to convert that absolute path to what its package path would be if it's
|
|
||||||
// contained in a known module or GOPATH entry. This will allow the package to be
|
|
||||||
// properly "reclaimed" when overlays are processed.
|
|
||||||
if filepath.IsAbs(p.ImportPath) && p.Error != nil {
|
|
||||||
pkgPath, ok, err := state.getPkgPath(p.ImportPath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if ok {
|
|
||||||
p.ImportPath = pkgPath
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if old, found := seen[p.ImportPath]; found {
|
|
||||||
// If one version of the package has an error, and the other doesn't, assume
|
|
||||||
// that this is a case where go list is reporting a fake dependency variant
|
|
||||||
// of the imported package: When a package tries to invalidly import another
|
|
||||||
// package, go list emits a variant of the imported package (with the same
|
|
||||||
// import path, but with an error on it, and the package will have a
|
|
||||||
// DepError set on it). An example of when this can happen is for imports of
|
|
||||||
// main packages: main packages can not be imported, but they may be
|
|
||||||
// separately matched and listed by another pattern.
|
|
||||||
// See golang.org/issue/36188 for more details.
|
|
||||||
|
|
||||||
// The plan is that eventually, hopefully in Go 1.15, the error will be
|
|
||||||
// reported on the importing package rather than the duplicate "fake"
|
|
||||||
// version of the imported package. Once all supported versions of Go
|
|
||||||
// have the new behavior this logic can be deleted.
|
|
||||||
// TODO(matloob): delete the workaround logic once all supported versions of
|
|
||||||
// Go return the errors on the proper package.
|
|
||||||
|
|
||||||
// There should be exactly one version of a package that doesn't have an
|
|
||||||
// error.
|
|
||||||
if old.Error == nil && p.Error == nil {
|
|
||||||
if !reflect.DeepEqual(p, old) {
|
|
||||||
return nil, fmt.Errorf("internal error: go list gives conflicting information for package %v", p.ImportPath)
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Determine if this package's error needs to be bubbled up.
|
|
||||||
// This is a hack, and we expect for go list to eventually set the error
|
|
||||||
// on the package.
|
|
||||||
if old.Error != nil {
|
|
||||||
var errkind string
|
|
||||||
if strings.Contains(old.Error.Err, "not an importable package") {
|
|
||||||
errkind = "not an importable package"
|
|
||||||
} else if strings.Contains(old.Error.Err, "use of internal package") && strings.Contains(old.Error.Err, "not allowed") {
|
|
||||||
errkind = "use of internal package not allowed"
|
|
||||||
}
|
|
||||||
if errkind != "" {
|
|
||||||
if len(old.Error.ImportStack) < 1 {
|
|
||||||
return nil, fmt.Errorf(`internal error: go list gave a %q error with empty import stack`, errkind)
|
|
||||||
}
|
|
||||||
importingPkg := old.Error.ImportStack[len(old.Error.ImportStack)-1]
|
|
||||||
if importingPkg == old.ImportPath {
|
|
||||||
// Using an older version of Go which put this package itself on top of import
|
|
||||||
// stack, instead of the importer. Look for importer in second from top
|
|
||||||
// position.
|
|
||||||
if len(old.Error.ImportStack) < 2 {
|
|
||||||
return nil, fmt.Errorf(`internal error: go list gave a %q error with an import stack without importing package`, errkind)
|
|
||||||
}
|
|
||||||
importingPkg = old.Error.ImportStack[len(old.Error.ImportStack)-2]
|
|
||||||
}
|
|
||||||
additionalErrors[importingPkg] = append(additionalErrors[importingPkg], Error{
|
|
||||||
Pos: old.Error.Pos,
|
|
||||||
Msg: old.Error.Err,
|
|
||||||
Kind: ListError,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make sure that if there's a version of the package without an error,
|
|
||||||
// that's the one reported to the user.
|
|
||||||
if old.Error == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// This package will replace the old one at the end of the loop.
|
|
||||||
}
|
|
||||||
seen[p.ImportPath] = p
|
|
||||||
|
|
||||||
pkg := &Package{
|
|
||||||
Name: p.Name,
|
|
||||||
ID: p.ImportPath,
|
|
||||||
GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles),
|
|
||||||
CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles),
|
|
||||||
OtherFiles: absJoin(p.Dir, otherFiles(p)...),
|
|
||||||
forTest: p.ForTest,
|
|
||||||
Module: p.Module,
|
|
||||||
}
|
|
||||||
|
|
||||||
if (state.cfg.Mode&typecheckCgo) != 0 && len(p.CgoFiles) != 0 {
|
|
||||||
if len(p.CompiledGoFiles) > len(p.GoFiles) {
|
|
||||||
// We need the cgo definitions, which are in the first
|
|
||||||
// CompiledGoFile after the non-cgo ones. This is a hack but there
|
|
||||||
// isn't currently a better way to find it. We also need the pure
|
|
||||||
// Go files and unprocessed cgo files, all of which are already
|
|
||||||
// in pkg.GoFiles.
|
|
||||||
cgoTypes := p.CompiledGoFiles[len(p.GoFiles)]
|
|
||||||
pkg.CompiledGoFiles = append([]string{cgoTypes}, pkg.GoFiles...)
|
|
||||||
} else {
|
|
||||||
// golang/go#38990: go list silently fails to do cgo processing
|
|
||||||
pkg.CompiledGoFiles = nil
|
|
||||||
pkg.Errors = append(pkg.Errors, Error{
|
|
||||||
Msg: "go list failed to return CompiledGoFiles; https://golang.org/issue/38990?",
|
|
||||||
Kind: ListError,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Work around https://golang.org/issue/28749:
|
|
||||||
// cmd/go puts assembly, C, and C++ files in CompiledGoFiles.
|
|
||||||
// Filter out any elements of CompiledGoFiles that are also in OtherFiles.
|
|
||||||
// We have to keep this workaround in place until go1.12 is a distant memory.
|
|
||||||
if len(pkg.OtherFiles) > 0 {
|
|
||||||
other := make(map[string]bool, len(pkg.OtherFiles))
|
|
||||||
for _, f := range pkg.OtherFiles {
|
|
||||||
other[f] = true
|
|
||||||
}
|
|
||||||
|
|
||||||
out := pkg.CompiledGoFiles[:0]
|
|
||||||
for _, f := range pkg.CompiledGoFiles {
|
|
||||||
if other[f] {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
out = append(out, f)
|
|
||||||
}
|
|
||||||
pkg.CompiledGoFiles = out
|
|
||||||
}
|
|
||||||
|
|
||||||
// Extract the PkgPath from the package's ID.
|
|
||||||
if i := strings.IndexByte(pkg.ID, ' '); i >= 0 {
|
|
||||||
pkg.PkgPath = pkg.ID[:i]
|
|
||||||
} else {
|
|
||||||
pkg.PkgPath = pkg.ID
|
|
||||||
}
|
|
||||||
|
|
||||||
if pkg.PkgPath == "unsafe" {
|
|
||||||
pkg.GoFiles = nil // ignore fake unsafe.go file
|
|
||||||
}
|
|
||||||
|
|
||||||
// Assume go list emits only absolute paths for Dir.
|
|
||||||
if p.Dir != "" && !filepath.IsAbs(p.Dir) {
|
|
||||||
log.Fatalf("internal error: go list returned non-absolute Package.Dir: %s", p.Dir)
|
|
||||||
}
|
|
||||||
|
|
||||||
if p.Export != "" && !filepath.IsAbs(p.Export) {
|
|
||||||
pkg.ExportFile = filepath.Join(p.Dir, p.Export)
|
|
||||||
} else {
|
|
||||||
pkg.ExportFile = p.Export
|
|
||||||
}
|
|
||||||
|
|
||||||
// imports
|
|
||||||
//
|
|
||||||
// Imports contains the IDs of all imported packages.
|
|
||||||
// ImportsMap records (path, ID) only where they differ.
|
|
||||||
ids := make(map[string]bool)
|
|
||||||
for _, id := range p.Imports {
|
|
||||||
ids[id] = true
|
|
||||||
}
|
|
||||||
pkg.Imports = make(map[string]*Package)
|
|
||||||
for path, id := range p.ImportMap {
|
|
||||||
pkg.Imports[path] = &Package{ID: id} // non-identity import
|
|
||||||
delete(ids, id)
|
|
||||||
}
|
|
||||||
for id := range ids {
|
|
||||||
if id == "C" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
pkg.Imports[id] = &Package{ID: id} // identity import
|
|
||||||
}
|
|
||||||
if !p.DepOnly {
|
|
||||||
response.Roots = append(response.Roots, pkg.ID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Work around for pre-go.1.11 versions of go list.
|
|
||||||
// TODO(matloob): they should be handled by the fallback.
|
|
||||||
// Can we delete this?
|
|
||||||
if len(pkg.CompiledGoFiles) == 0 {
|
|
||||||
pkg.CompiledGoFiles = pkg.GoFiles
|
|
||||||
}
|
|
||||||
|
|
||||||
if p.Error != nil {
|
|
||||||
msg := strings.TrimSpace(p.Error.Err) // Trim to work around golang.org/issue/32363.
|
|
||||||
// Address golang.org/issue/35964 by appending import stack to error message.
|
|
||||||
if msg == "import cycle not allowed" && len(p.Error.ImportStack) != 0 {
|
|
||||||
msg += fmt.Sprintf(": import stack: %v", p.Error.ImportStack)
|
|
||||||
}
|
|
||||||
pkg.Errors = append(pkg.Errors, Error{
|
|
||||||
Pos: p.Error.Pos,
|
|
||||||
Msg: msg,
|
|
||||||
Kind: ListError,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pkgs[pkg.ID] = pkg
|
|
||||||
}
|
|
||||||
|
|
||||||
for id, errs := range additionalErrors {
|
|
||||||
if p, ok := pkgs[id]; ok {
|
|
||||||
p.Errors = append(p.Errors, errs...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, pkg := range pkgs {
|
|
||||||
response.Packages = append(response.Packages, pkg)
|
|
||||||
}
|
|
||||||
sort.Slice(response.Packages, func(i, j int) bool { return response.Packages[i].ID < response.Packages[j].ID })
|
|
||||||
|
|
||||||
return &response, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// getPkgPath finds the package path of a directory if it's relative to a root directory.
|
|
||||||
func (state *golistState) getPkgPath(dir string) (string, bool, error) {
|
|
||||||
absDir, err := filepath.Abs(dir)
|
|
||||||
if err != nil {
|
|
||||||
return "", false, err
|
|
||||||
}
|
|
||||||
roots, err := state.determineRootDirs()
|
|
||||||
if err != nil {
|
|
||||||
return "", false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
for rdir, rpath := range roots {
|
|
||||||
// Make sure that the directory is in the module,
|
|
||||||
// to avoid creating a path relative to another module.
|
|
||||||
if !strings.HasPrefix(absDir, rdir) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// TODO(matloob): This doesn't properly handle symlinks.
|
|
||||||
r, err := filepath.Rel(rdir, dir)
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if rpath != "" {
|
|
||||||
// We choose only one root even though the directory even it can belong in multiple modules
|
|
||||||
// or GOPATH entries. This is okay because we only need to work with absolute dirs when a
|
|
||||||
// file is missing from disk, for instance when gopls calls go/packages in an overlay.
|
|
||||||
// Once the file is saved, gopls, or the next invocation of the tool will get the correct
|
|
||||||
// result straight from golist.
|
|
||||||
// TODO(matloob): Implement module tiebreaking?
|
|
||||||
return path.Join(rpath, filepath.ToSlash(r)), true, nil
|
|
||||||
}
|
|
||||||
return filepath.ToSlash(r), true, nil
|
|
||||||
}
|
|
||||||
return "", false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// absJoin absolutizes and flattens the lists of files.
|
|
||||||
func absJoin(dir string, fileses ...[]string) (res []string) {
|
|
||||||
for _, files := range fileses {
|
|
||||||
for _, file := range files {
|
|
||||||
if !filepath.IsAbs(file) {
|
|
||||||
file = filepath.Join(dir, file)
|
|
||||||
}
|
|
||||||
res = append(res, file)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
func golistargs(cfg *Config, words []string) []string {
|
|
||||||
const findFlags = NeedImports | NeedTypes | NeedSyntax | NeedTypesInfo
|
|
||||||
fullargs := []string{
|
|
||||||
"-e", "-json",
|
|
||||||
fmt.Sprintf("-compiled=%t", cfg.Mode&(NeedCompiledGoFiles|NeedSyntax|NeedTypes|NeedTypesInfo|NeedTypesSizes) != 0),
|
|
||||||
fmt.Sprintf("-test=%t", cfg.Tests),
|
|
||||||
fmt.Sprintf("-export=%t", usesExportData(cfg)),
|
|
||||||
fmt.Sprintf("-deps=%t", cfg.Mode&NeedImports != 0),
|
|
||||||
// go list doesn't let you pass -test and -find together,
|
|
||||||
// probably because you'd just get the TestMain.
|
|
||||||
fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0),
|
|
||||||
}
|
|
||||||
fullargs = append(fullargs, cfg.BuildFlags...)
|
|
||||||
fullargs = append(fullargs, "--")
|
|
||||||
fullargs = append(fullargs, words...)
|
|
||||||
return fullargs
|
|
||||||
}
|
|
||||||
|
|
||||||
// invokeGo returns the stdout of a go command invocation.
|
|
||||||
func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, error) {
|
|
||||||
cfg := state.cfg
|
|
||||||
|
|
||||||
inv := gocommand.Invocation{
|
|
||||||
Verb: verb,
|
|
||||||
Args: args,
|
|
||||||
BuildFlags: cfg.BuildFlags,
|
|
||||||
Env: cfg.Env,
|
|
||||||
Logf: cfg.Logf,
|
|
||||||
WorkingDir: cfg.Dir,
|
|
||||||
}
|
|
||||||
gocmdRunner := cfg.gocmdRunner
|
|
||||||
if gocmdRunner == nil {
|
|
||||||
gocmdRunner = &gocommand.Runner{}
|
|
||||||
}
|
|
||||||
stdout, stderr, _, err := gocmdRunner.RunRaw(cfg.Context, inv)
|
|
||||||
if err != nil {
|
|
||||||
// Check for 'go' executable not being found.
|
|
||||||
if ee, ok := err.(*exec.Error); ok && ee.Err == exec.ErrNotFound {
|
|
||||||
return nil, fmt.Errorf("'go list' driver requires 'go', but %s", exec.ErrNotFound)
|
|
||||||
}
|
|
||||||
|
|
||||||
exitErr, ok := err.(*exec.ExitError)
|
|
||||||
if !ok {
|
|
||||||
// Catastrophic error:
|
|
||||||
// - context cancellation
|
|
||||||
return nil, xerrors.Errorf("couldn't run 'go': %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Old go version?
|
|
||||||
if strings.Contains(stderr.String(), "flag provided but not defined") {
|
|
||||||
return nil, goTooOldError{fmt.Errorf("unsupported version of go: %s: %s", exitErr, stderr)}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Related to #24854
|
|
||||||
if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "unexpected directory layout") {
|
|
||||||
return nil, fmt.Errorf("%s", stderr.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Is there an error running the C compiler in cgo? This will be reported in the "Error" field
|
|
||||||
// and should be suppressed by go list -e.
|
|
||||||
//
|
|
||||||
// This condition is not perfect yet because the error message can include other error messages than runtime/cgo.
|
|
||||||
isPkgPathRune := func(r rune) bool {
|
|
||||||
// From https://golang.org/ref/spec#Import_declarations:
|
|
||||||
// Implementation restriction: A compiler may restrict ImportPaths to non-empty strings
|
|
||||||
// using only characters belonging to Unicode's L, M, N, P, and S general categories
|
|
||||||
// (the Graphic characters without spaces) and may also exclude the
|
|
||||||
// characters !"#$%&'()*,:;<=>?[\]^`{|} and the Unicode replacement character U+FFFD.
|
|
||||||
return unicode.IsOneOf([]*unicode.RangeTable{unicode.L, unicode.M, unicode.N, unicode.P, unicode.S}, r) &&
|
|
||||||
!strings.ContainsRune("!\"#$%&'()*,:;<=>?[\\]^`{|}\uFFFD", r)
|
|
||||||
}
|
|
||||||
if len(stderr.String()) > 0 && strings.HasPrefix(stderr.String(), "# ") {
|
|
||||||
msg := stderr.String()[len("# "):]
|
|
||||||
if strings.HasPrefix(strings.TrimLeftFunc(msg, isPkgPathRune), "\n") {
|
|
||||||
return stdout, nil
|
|
||||||
}
|
|
||||||
// Treat pkg-config errors as a special case (golang.org/issue/36770).
|
|
||||||
if strings.HasPrefix(msg, "pkg-config") {
|
|
||||||
return stdout, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// This error only appears in stderr. See golang.org/cl/166398 for a fix in go list to show
|
|
||||||
// the error in the Err section of stdout in case -e option is provided.
|
|
||||||
// This fix is provided for backwards compatibility.
|
|
||||||
if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "named files must be .go files") {
|
|
||||||
output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
|
|
||||||
strings.Trim(stderr.String(), "\n"))
|
|
||||||
return bytes.NewBufferString(output), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Similar to the previous error, but currently lacks a fix in Go.
|
|
||||||
if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "named files must all be in one directory") {
|
|
||||||
output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
|
|
||||||
strings.Trim(stderr.String(), "\n"))
|
|
||||||
return bytes.NewBufferString(output), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Backwards compatibility for Go 1.11 because 1.12 and 1.13 put the directory in the ImportPath.
|
|
||||||
// If the package doesn't exist, put the absolute path of the directory into the error message,
|
|
||||||
// as Go 1.13 list does.
|
|
||||||
const noSuchDirectory = "no such directory"
|
|
||||||
if len(stderr.String()) > 0 && strings.Contains(stderr.String(), noSuchDirectory) {
|
|
||||||
errstr := stderr.String()
|
|
||||||
abspath := strings.TrimSpace(errstr[strings.Index(errstr, noSuchDirectory)+len(noSuchDirectory):])
|
|
||||||
output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
|
|
||||||
abspath, strings.Trim(stderr.String(), "\n"))
|
|
||||||
return bytes.NewBufferString(output), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Workaround for #29280: go list -e has incorrect behavior when an ad-hoc package doesn't exist.
|
|
||||||
// Note that the error message we look for in this case is different that the one looked for above.
|
|
||||||
if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no such file or directory") {
|
|
||||||
output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
|
|
||||||
strings.Trim(stderr.String(), "\n"))
|
|
||||||
return bytes.NewBufferString(output), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Workaround for #34273. go list -e with GO111MODULE=on has incorrect behavior when listing a
|
|
||||||
// directory outside any module.
|
|
||||||
if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "outside available modules") {
|
|
||||||
output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
|
|
||||||
// TODO(matloob): command-line-arguments isn't correct here.
|
|
||||||
"command-line-arguments", strings.Trim(stderr.String(), "\n"))
|
|
||||||
return bytes.NewBufferString(output), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Another variation of the previous error
|
|
||||||
if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "outside module root") {
|
|
||||||
output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
|
|
||||||
// TODO(matloob): command-line-arguments isn't correct here.
|
|
||||||
"command-line-arguments", strings.Trim(stderr.String(), "\n"))
|
|
||||||
return bytes.NewBufferString(output), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Workaround for an instance of golang.org/issue/26755: go list -e will return a non-zero exit
|
|
||||||
// status if there's a dependency on a package that doesn't exist. But it should return
|
|
||||||
// a zero exit status and set an error on that package.
|
|
||||||
if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no Go files in") {
|
|
||||||
// Don't clobber stdout if `go list` actually returned something.
|
|
||||||
if len(stdout.String()) > 0 {
|
|
||||||
return stdout, nil
|
|
||||||
}
|
|
||||||
// try to extract package name from string
|
|
||||||
stderrStr := stderr.String()
|
|
||||||
var importPath string
|
|
||||||
colon := strings.Index(stderrStr, ":")
|
|
||||||
if colon > 0 && strings.HasPrefix(stderrStr, "go build ") {
|
|
||||||
importPath = stderrStr[len("go build "):colon]
|
|
||||||
}
|
|
||||||
output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
|
|
||||||
importPath, strings.Trim(stderrStr, "\n"))
|
|
||||||
return bytes.NewBufferString(output), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Export mode entails a build.
|
|
||||||
// If that build fails, errors appear on stderr
|
|
||||||
// (despite the -e flag) and the Export field is blank.
|
|
||||||
// Do not fail in that case.
|
|
||||||
// The same is true if an ad-hoc package given to go list doesn't exist.
|
|
||||||
// TODO(matloob): Remove these once we can depend on go list to exit with a zero status with -e even when
|
|
||||||
// packages don't exist or a build fails.
|
|
||||||
if !usesExportData(cfg) && !containsGoFile(args) {
|
|
||||||
return nil, fmt.Errorf("go %v: %s: %s", args, exitErr, stderr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return stdout, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func containsGoFile(s []string) bool {
|
|
||||||
for _, f := range s {
|
|
||||||
if strings.HasSuffix(f, ".go") {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func cmdDebugStr(cmd *exec.Cmd, args ...string) string {
|
|
||||||
env := make(map[string]string)
|
|
||||||
for _, kv := range cmd.Env {
|
|
||||||
split := strings.Split(kv, "=")
|
|
||||||
k, v := split[0], split[1]
|
|
||||||
env[k] = v
|
|
||||||
}
|
|
||||||
var quotedArgs []string
|
|
||||||
for _, arg := range args {
|
|
||||||
quotedArgs = append(quotedArgs, strconv.Quote(arg))
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v PWD=%v go %s", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["PWD"], strings.Join(quotedArgs, " "))
|
|
||||||
}
|
|
465
vendor/golang.org/x/tools/go/packages/golist_overlay.go
generated
vendored
465
vendor/golang.org/x/tools/go/packages/golist_overlay.go
generated
vendored
|
@ -1,465 +0,0 @@
|
||||||
package packages
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"go/parser"
|
|
||||||
"go/token"
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// processGolistOverlay provides rudimentary support for adding
|
|
||||||
// files that don't exist on disk to an overlay. The results can be
|
|
||||||
// sometimes incorrect.
|
|
||||||
// TODO(matloob): Handle unsupported cases, including the following:
|
|
||||||
// - determining the correct package to add given a new import path
|
|
||||||
func (state *golistState) processGolistOverlay(response *responseDeduper) (modifiedPkgs, needPkgs []string, err error) {
|
|
||||||
havePkgs := make(map[string]string) // importPath -> non-test package ID
|
|
||||||
needPkgsSet := make(map[string]bool)
|
|
||||||
modifiedPkgsSet := make(map[string]bool)
|
|
||||||
|
|
||||||
pkgOfDir := make(map[string][]*Package)
|
|
||||||
for _, pkg := range response.dr.Packages {
|
|
||||||
// This is an approximation of import path to id. This can be
|
|
||||||
// wrong for tests, vendored packages, and a number of other cases.
|
|
||||||
havePkgs[pkg.PkgPath] = pkg.ID
|
|
||||||
x := commonDir(pkg.GoFiles)
|
|
||||||
if x != "" {
|
|
||||||
pkgOfDir[x] = append(pkgOfDir[x], pkg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If no new imports are added, it is safe to avoid loading any needPkgs.
|
|
||||||
// Otherwise, it's hard to tell which package is actually being loaded
|
|
||||||
// (due to vendoring) and whether any modified package will show up
|
|
||||||
// in the transitive set of dependencies (because new imports are added,
|
|
||||||
// potentially modifying the transitive set of dependencies).
|
|
||||||
var overlayAddsImports bool
|
|
||||||
|
|
||||||
// If both a package and its test package are created by the overlay, we
|
|
||||||
// need the real package first. Process all non-test files before test
|
|
||||||
// files, and make the whole process deterministic while we're at it.
|
|
||||||
var overlayFiles []string
|
|
||||||
for opath := range state.cfg.Overlay {
|
|
||||||
overlayFiles = append(overlayFiles, opath)
|
|
||||||
}
|
|
||||||
sort.Slice(overlayFiles, func(i, j int) bool {
|
|
||||||
iTest := strings.HasSuffix(overlayFiles[i], "_test.go")
|
|
||||||
jTest := strings.HasSuffix(overlayFiles[j], "_test.go")
|
|
||||||
if iTest != jTest {
|
|
||||||
return !iTest // non-tests are before tests.
|
|
||||||
}
|
|
||||||
return overlayFiles[i] < overlayFiles[j]
|
|
||||||
})
|
|
||||||
for _, opath := range overlayFiles {
|
|
||||||
contents := state.cfg.Overlay[opath]
|
|
||||||
base := filepath.Base(opath)
|
|
||||||
dir := filepath.Dir(opath)
|
|
||||||
var pkg *Package // if opath belongs to both a package and its test variant, this will be the test variant
|
|
||||||
var testVariantOf *Package // if opath is a test file, this is the package it is testing
|
|
||||||
var fileExists bool
|
|
||||||
isTestFile := strings.HasSuffix(opath, "_test.go")
|
|
||||||
pkgName, ok := extractPackageName(opath, contents)
|
|
||||||
if !ok {
|
|
||||||
// Don't bother adding a file that doesn't even have a parsable package statement
|
|
||||||
// to the overlay.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// If all the overlay files belong to a different package, change the
|
|
||||||
// package name to that package.
|
|
||||||
maybeFixPackageName(pkgName, isTestFile, pkgOfDir[dir])
|
|
||||||
nextPackage:
|
|
||||||
for _, p := range response.dr.Packages {
|
|
||||||
if pkgName != p.Name && p.ID != "command-line-arguments" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
for _, f := range p.GoFiles {
|
|
||||||
if !sameFile(filepath.Dir(f), dir) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
// Make sure to capture information on the package's test variant, if needed.
|
|
||||||
if isTestFile && !hasTestFiles(p) {
|
|
||||||
// TODO(matloob): Are there packages other than the 'production' variant
|
|
||||||
// of a package that this can match? This shouldn't match the test main package
|
|
||||||
// because the file is generated in another directory.
|
|
||||||
testVariantOf = p
|
|
||||||
continue nextPackage
|
|
||||||
}
|
|
||||||
// We must have already seen the package of which this is a test variant.
|
|
||||||
if pkg != nil && p != pkg && pkg.PkgPath == p.PkgPath {
|
|
||||||
if hasTestFiles(p) {
|
|
||||||
testVariantOf = pkg
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pkg = p
|
|
||||||
if filepath.Base(f) == base {
|
|
||||||
fileExists = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// The overlay could have included an entirely new package.
|
|
||||||
if pkg == nil {
|
|
||||||
// Try to find the module or gopath dir the file is contained in.
|
|
||||||
// Then for modules, add the module opath to the beginning.
|
|
||||||
pkgPath, ok, err := state.getPkgPath(dir)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
if !ok {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
var forTest string // only set for x tests
|
|
||||||
isXTest := strings.HasSuffix(pkgName, "_test")
|
|
||||||
if isXTest {
|
|
||||||
forTest = pkgPath
|
|
||||||
pkgPath += "_test"
|
|
||||||
}
|
|
||||||
id := pkgPath
|
|
||||||
if isTestFile {
|
|
||||||
if isXTest {
|
|
||||||
id = fmt.Sprintf("%s [%s.test]", pkgPath, forTest)
|
|
||||||
} else {
|
|
||||||
id = fmt.Sprintf("%s [%s.test]", pkgPath, pkgPath)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Try to reclaim a package with the same ID, if it exists in the response.
|
|
||||||
for _, p := range response.dr.Packages {
|
|
||||||
if reclaimPackage(p, id, opath, contents) {
|
|
||||||
pkg = p
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Otherwise, create a new package.
|
|
||||||
if pkg == nil {
|
|
||||||
pkg = &Package{
|
|
||||||
PkgPath: pkgPath,
|
|
||||||
ID: id,
|
|
||||||
Name: pkgName,
|
|
||||||
Imports: make(map[string]*Package),
|
|
||||||
}
|
|
||||||
response.addPackage(pkg)
|
|
||||||
havePkgs[pkg.PkgPath] = id
|
|
||||||
// Add the production package's sources for a test variant.
|
|
||||||
if isTestFile && !isXTest && testVariantOf != nil {
|
|
||||||
pkg.GoFiles = append(pkg.GoFiles, testVariantOf.GoFiles...)
|
|
||||||
pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, testVariantOf.CompiledGoFiles...)
|
|
||||||
// Add the package under test and its imports to the test variant.
|
|
||||||
pkg.forTest = testVariantOf.PkgPath
|
|
||||||
for k, v := range testVariantOf.Imports {
|
|
||||||
pkg.Imports[k] = &Package{ID: v.ID}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if isXTest {
|
|
||||||
pkg.forTest = forTest
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !fileExists {
|
|
||||||
pkg.GoFiles = append(pkg.GoFiles, opath)
|
|
||||||
// TODO(matloob): Adding the file to CompiledGoFiles can exhibit the wrong behavior
|
|
||||||
// if the file will be ignored due to its build tags.
|
|
||||||
pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, opath)
|
|
||||||
modifiedPkgsSet[pkg.ID] = true
|
|
||||||
}
|
|
||||||
imports, err := extractImports(opath, contents)
|
|
||||||
if err != nil {
|
|
||||||
// Let the parser or type checker report errors later.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
for _, imp := range imports {
|
|
||||||
// TODO(rstambler): If the package is an x test and the import has
|
|
||||||
// a test variant, make sure to replace it.
|
|
||||||
if _, found := pkg.Imports[imp]; found {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
overlayAddsImports = true
|
|
||||||
id, ok := havePkgs[imp]
|
|
||||||
if !ok {
|
|
||||||
var err error
|
|
||||||
id, err = state.resolveImport(dir, imp)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pkg.Imports[imp] = &Package{ID: id}
|
|
||||||
// Add dependencies to the non-test variant version of this package as well.
|
|
||||||
if testVariantOf != nil {
|
|
||||||
testVariantOf.Imports[imp] = &Package{ID: id}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// toPkgPath guesses the package path given the id.
|
|
||||||
toPkgPath := func(sourceDir, id string) (string, error) {
|
|
||||||
if i := strings.IndexByte(id, ' '); i >= 0 {
|
|
||||||
return state.resolveImport(sourceDir, id[:i])
|
|
||||||
}
|
|
||||||
return state.resolveImport(sourceDir, id)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Now that new packages have been created, do another pass to determine
|
|
||||||
// the new set of missing packages.
|
|
||||||
for _, pkg := range response.dr.Packages {
|
|
||||||
for _, imp := range pkg.Imports {
|
|
||||||
if len(pkg.GoFiles) == 0 {
|
|
||||||
return nil, nil, fmt.Errorf("cannot resolve imports for package %q with no Go files", pkg.PkgPath)
|
|
||||||
}
|
|
||||||
pkgPath, err := toPkgPath(filepath.Dir(pkg.GoFiles[0]), imp.ID)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
if _, ok := havePkgs[pkgPath]; !ok {
|
|
||||||
needPkgsSet[pkgPath] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if overlayAddsImports {
|
|
||||||
needPkgs = make([]string, 0, len(needPkgsSet))
|
|
||||||
for pkg := range needPkgsSet {
|
|
||||||
needPkgs = append(needPkgs, pkg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
modifiedPkgs = make([]string, 0, len(modifiedPkgsSet))
|
|
||||||
for pkg := range modifiedPkgsSet {
|
|
||||||
modifiedPkgs = append(modifiedPkgs, pkg)
|
|
||||||
}
|
|
||||||
return modifiedPkgs, needPkgs, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// resolveImport finds the the ID of a package given its import path.
|
|
||||||
// In particular, it will find the right vendored copy when in GOPATH mode.
|
|
||||||
func (state *golistState) resolveImport(sourceDir, importPath string) (string, error) {
|
|
||||||
env, err := state.getEnv()
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
if env["GOMOD"] != "" {
|
|
||||||
return importPath, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
searchDir := sourceDir
|
|
||||||
for {
|
|
||||||
vendorDir := filepath.Join(searchDir, "vendor")
|
|
||||||
exists, ok := state.vendorDirs[vendorDir]
|
|
||||||
if !ok {
|
|
||||||
info, err := os.Stat(vendorDir)
|
|
||||||
exists = err == nil && info.IsDir()
|
|
||||||
state.vendorDirs[vendorDir] = exists
|
|
||||||
}
|
|
||||||
|
|
||||||
if exists {
|
|
||||||
vendoredPath := filepath.Join(vendorDir, importPath)
|
|
||||||
if info, err := os.Stat(vendoredPath); err == nil && info.IsDir() {
|
|
||||||
// We should probably check for .go files here, but shame on anyone who fools us.
|
|
||||||
path, ok, err := state.getPkgPath(vendoredPath)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
if ok {
|
|
||||||
return path, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// We know we've hit the top of the filesystem when we Dir / and get /,
|
|
||||||
// or C:\ and get C:\, etc.
|
|
||||||
next := filepath.Dir(searchDir)
|
|
||||||
if next == searchDir {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
searchDir = next
|
|
||||||
}
|
|
||||||
return importPath, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func hasTestFiles(p *Package) bool {
|
|
||||||
for _, f := range p.GoFiles {
|
|
||||||
if strings.HasSuffix(f, "_test.go") {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// determineRootDirs returns a mapping from absolute directories that could
|
|
||||||
// contain code to their corresponding import path prefixes.
|
|
||||||
func (state *golistState) determineRootDirs() (map[string]string, error) {
|
|
||||||
env, err := state.getEnv()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if env["GOMOD"] != "" {
|
|
||||||
state.rootsOnce.Do(func() {
|
|
||||||
state.rootDirs, state.rootDirsError = state.determineRootDirsModules()
|
|
||||||
})
|
|
||||||
} else {
|
|
||||||
state.rootsOnce.Do(func() {
|
|
||||||
state.rootDirs, state.rootDirsError = state.determineRootDirsGOPATH()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
return state.rootDirs, state.rootDirsError
|
|
||||||
}
|
|
||||||
|
|
||||||
func (state *golistState) determineRootDirsModules() (map[string]string, error) {
|
|
||||||
// This will only return the root directory for the main module.
|
|
||||||
// For now we only support overlays in main modules.
|
|
||||||
// Editing files in the module cache isn't a great idea, so we don't
|
|
||||||
// plan to ever support that, but editing files in replaced modules
|
|
||||||
// is something we may want to support. To do that, we'll want to
|
|
||||||
// do a go list -m to determine the replaced module's module path and
|
|
||||||
// directory, and then a go list -m {{with .Replace}}{{.Dir}}{{end}} <replaced module's path>
|
|
||||||
// from the main module to determine if that module is actually a replacement.
|
|
||||||
// See bcmills's comment here: https://github.com/golang/go/issues/37629#issuecomment-594179751
|
|
||||||
// for more information.
|
|
||||||
out, err := state.invokeGo("list", "-m", "-json")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
m := map[string]string{}
|
|
||||||
type jsonMod struct{ Path, Dir string }
|
|
||||||
for dec := json.NewDecoder(out); dec.More(); {
|
|
||||||
mod := new(jsonMod)
|
|
||||||
if err := dec.Decode(mod); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if mod.Dir != "" && mod.Path != "" {
|
|
||||||
// This is a valid module; add it to the map.
|
|
||||||
absDir, err := filepath.Abs(mod.Dir)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
m[absDir] = mod.Path
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return m, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (state *golistState) determineRootDirsGOPATH() (map[string]string, error) {
|
|
||||||
m := map[string]string{}
|
|
||||||
for _, dir := range filepath.SplitList(state.mustGetEnv()["GOPATH"]) {
|
|
||||||
absDir, err := filepath.Abs(dir)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
m[filepath.Join(absDir, "src")] = ""
|
|
||||||
}
|
|
||||||
return m, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func extractImports(filename string, contents []byte) ([]string, error) {
|
|
||||||
f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.ImportsOnly) // TODO(matloob): reuse fileset?
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var res []string
|
|
||||||
for _, imp := range f.Imports {
|
|
||||||
quotedPath := imp.Path.Value
|
|
||||||
path, err := strconv.Unquote(quotedPath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
res = append(res, path)
|
|
||||||
}
|
|
||||||
return res, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// reclaimPackage attempts to reuse a package that failed to load in an overlay.
|
|
||||||
//
|
|
||||||
// If the package has errors and has no Name, GoFiles, or Imports,
|
|
||||||
// then it's possible that it doesn't yet exist on disk.
|
|
||||||
func reclaimPackage(pkg *Package, id string, filename string, contents []byte) bool {
|
|
||||||
// TODO(rstambler): Check the message of the actual error?
|
|
||||||
// It differs between $GOPATH and module mode.
|
|
||||||
if pkg.ID != id {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if len(pkg.Errors) != 1 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if pkg.Name != "" || pkg.ExportFile != "" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if len(pkg.GoFiles) > 0 || len(pkg.CompiledGoFiles) > 0 || len(pkg.OtherFiles) > 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if len(pkg.Imports) > 0 {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
pkgName, ok := extractPackageName(filename, contents)
|
|
||||||
if !ok {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
pkg.Name = pkgName
|
|
||||||
pkg.Errors = nil
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func extractPackageName(filename string, contents []byte) (string, bool) {
|
|
||||||
// TODO(rstambler): Check the message of the actual error?
|
|
||||||
// It differs between $GOPATH and module mode.
|
|
||||||
f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.PackageClauseOnly) // TODO(matloob): reuse fileset?
|
|
||||||
if err != nil {
|
|
||||||
return "", false
|
|
||||||
}
|
|
||||||
return f.Name.Name, true
|
|
||||||
}
|
|
||||||
|
|
||||||
func commonDir(a []string) string {
|
|
||||||
seen := make(map[string]bool)
|
|
||||||
x := append([]string{}, a...)
|
|
||||||
for _, f := range x {
|
|
||||||
seen[filepath.Dir(f)] = true
|
|
||||||
}
|
|
||||||
if len(seen) > 1 {
|
|
||||||
log.Fatalf("commonDir saw %v for %v", seen, x)
|
|
||||||
}
|
|
||||||
for k := range seen {
|
|
||||||
// len(seen) == 1
|
|
||||||
return k
|
|
||||||
}
|
|
||||||
return "" // no files
|
|
||||||
}
|
|
||||||
|
|
||||||
// It is possible that the files in the disk directory dir have a different package
|
|
||||||
// name from newName, which is deduced from the overlays. If they all have a different
|
|
||||||
// package name, and they all have the same package name, then that name becomes
|
|
||||||
// the package name.
|
|
||||||
// It returns true if it changes the package name, false otherwise.
|
|
||||||
func maybeFixPackageName(newName string, isTestFile bool, pkgsOfDir []*Package) {
|
|
||||||
names := make(map[string]int)
|
|
||||||
for _, p := range pkgsOfDir {
|
|
||||||
names[p.Name]++
|
|
||||||
}
|
|
||||||
if len(names) != 1 {
|
|
||||||
// some files are in different packages
|
|
||||||
return
|
|
||||||
}
|
|
||||||
var oldName string
|
|
||||||
for k := range names {
|
|
||||||
oldName = k
|
|
||||||
}
|
|
||||||
if newName == oldName {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// We might have a case where all of the package names in the directory are
|
|
||||||
// the same, but the overlay file is for an x test, which belongs to its
|
|
||||||
// own package. If the x test does not yet exist on disk, we may not yet
|
|
||||||
// have its package name on disk, but we should not rename the packages.
|
|
||||||
//
|
|
||||||
// We use a heuristic to determine if this file belongs to an x test:
|
|
||||||
// The test file should have a package name whose package name has a _test
|
|
||||||
// suffix or looks like "newName_test".
|
|
||||||
maybeXTest := strings.HasPrefix(oldName+"_test", newName) || strings.HasSuffix(newName, "_test")
|
|
||||||
if isTestFile && maybeXTest {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for _, p := range pkgsOfDir {
|
|
||||||
p.Name = newName
|
|
||||||
}
|
|
||||||
}
|
|
57
vendor/golang.org/x/tools/go/packages/loadmode_string.go
generated
vendored
57
vendor/golang.org/x/tools/go/packages/loadmode_string.go
generated
vendored
|
@ -1,57 +0,0 @@
|
||||||
// Copyright 2019 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package packages
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
var allModes = []LoadMode{
|
|
||||||
NeedName,
|
|
||||||
NeedFiles,
|
|
||||||
NeedCompiledGoFiles,
|
|
||||||
NeedImports,
|
|
||||||
NeedDeps,
|
|
||||||
NeedExportsFile,
|
|
||||||
NeedTypes,
|
|
||||||
NeedSyntax,
|
|
||||||
NeedTypesInfo,
|
|
||||||
NeedTypesSizes,
|
|
||||||
}
|
|
||||||
|
|
||||||
var modeStrings = []string{
|
|
||||||
"NeedName",
|
|
||||||
"NeedFiles",
|
|
||||||
"NeedCompiledGoFiles",
|
|
||||||
"NeedImports",
|
|
||||||
"NeedDeps",
|
|
||||||
"NeedExportsFile",
|
|
||||||
"NeedTypes",
|
|
||||||
"NeedSyntax",
|
|
||||||
"NeedTypesInfo",
|
|
||||||
"NeedTypesSizes",
|
|
||||||
}
|
|
||||||
|
|
||||||
func (mod LoadMode) String() string {
|
|
||||||
m := mod
|
|
||||||
if m == 0 {
|
|
||||||
return "LoadMode(0)"
|
|
||||||
}
|
|
||||||
var out []string
|
|
||||||
for i, x := range allModes {
|
|
||||||
if x > m {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if (m & x) != 0 {
|
|
||||||
out = append(out, modeStrings[i])
|
|
||||||
m = m ^ x
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if m != 0 {
|
|
||||||
out = append(out, "Unknown")
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("LoadMode(%s)", strings.Join(out, "|"))
|
|
||||||
}
|
|
1212
vendor/golang.org/x/tools/go/packages/packages.go
generated
vendored
1212
vendor/golang.org/x/tools/go/packages/packages.go
generated
vendored
File diff suppressed because it is too large
Load diff
55
vendor/golang.org/x/tools/go/packages/visit.go
generated
vendored
55
vendor/golang.org/x/tools/go/packages/visit.go
generated
vendored
|
@ -1,55 +0,0 @@
|
||||||
package packages
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"sort"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Visit visits all the packages in the import graph whose roots are
|
|
||||||
// pkgs, calling the optional pre function the first time each package
|
|
||||||
// is encountered (preorder), and the optional post function after a
|
|
||||||
// package's dependencies have been visited (postorder).
|
|
||||||
// The boolean result of pre(pkg) determines whether
|
|
||||||
// the imports of package pkg are visited.
|
|
||||||
func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) {
|
|
||||||
seen := make(map[*Package]bool)
|
|
||||||
var visit func(*Package)
|
|
||||||
visit = func(pkg *Package) {
|
|
||||||
if !seen[pkg] {
|
|
||||||
seen[pkg] = true
|
|
||||||
|
|
||||||
if pre == nil || pre(pkg) {
|
|
||||||
paths := make([]string, 0, len(pkg.Imports))
|
|
||||||
for path := range pkg.Imports {
|
|
||||||
paths = append(paths, path)
|
|
||||||
}
|
|
||||||
sort.Strings(paths) // Imports is a map, this makes visit stable
|
|
||||||
for _, path := range paths {
|
|
||||||
visit(pkg.Imports[path])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if post != nil {
|
|
||||||
post(pkg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, pkg := range pkgs {
|
|
||||||
visit(pkg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrintErrors prints to os.Stderr the accumulated errors of all
|
|
||||||
// packages in the import graph rooted at pkgs, dependencies first.
|
|
||||||
// PrintErrors returns the number of errors printed.
|
|
||||||
func PrintErrors(pkgs []*Package) int {
|
|
||||||
var n int
|
|
||||||
Visit(pkgs, nil, func(pkg *Package) {
|
|
||||||
for _, err := range pkg.Errors {
|
|
||||||
fmt.Fprintln(os.Stderr, err)
|
|
||||||
n++
|
|
||||||
}
|
|
||||||
})
|
|
||||||
return n
|
|
||||||
}
|
|
524
vendor/golang.org/x/tools/go/types/objectpath/objectpath.go
generated
vendored
524
vendor/golang.org/x/tools/go/types/objectpath/objectpath.go
generated
vendored
|
@ -1,524 +0,0 @@
|
||||||
// Copyright 2018 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package objectpath defines a naming scheme for types.Objects
|
|
||||||
// (that is, named entities in Go programs) relative to their enclosing
|
|
||||||
// package.
|
|
||||||
//
|
|
||||||
// Type-checker objects are canonical, so they are usually identified by
|
|
||||||
// their address in memory (a pointer), but a pointer has meaning only
|
|
||||||
// within one address space. By contrast, objectpath names allow the
|
|
||||||
// identity of an object to be sent from one program to another,
|
|
||||||
// establishing a correspondence between types.Object variables that are
|
|
||||||
// distinct but logically equivalent.
|
|
||||||
//
|
|
||||||
// A single object may have multiple paths. In this example,
|
|
||||||
// type A struct{ X int }
|
|
||||||
// type B A
|
|
||||||
// the field X has two paths due to its membership of both A and B.
|
|
||||||
// The For(obj) function always returns one of these paths, arbitrarily
|
|
||||||
// but consistently.
|
|
||||||
package objectpath
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"go/types"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A Path is an opaque name that identifies a types.Object
|
|
||||||
// relative to its package. Conceptually, the name consists of a
|
|
||||||
// sequence of destructuring operations applied to the package scope
|
|
||||||
// to obtain the original object.
|
|
||||||
// The name does not include the package itself.
|
|
||||||
type Path string
|
|
||||||
|
|
||||||
// Encoding
|
|
||||||
//
|
|
||||||
// An object path is a textual and (with training) human-readable encoding
|
|
||||||
// of a sequence of destructuring operators, starting from a types.Package.
|
|
||||||
// The sequences represent a path through the package/object/type graph.
|
|
||||||
// We classify these operators by their type:
|
|
||||||
//
|
|
||||||
// PO package->object Package.Scope.Lookup
|
|
||||||
// OT object->type Object.Type
|
|
||||||
// TT type->type Type.{Elem,Key,Params,Results,Underlying} [EKPRU]
|
|
||||||
// TO type->object Type.{At,Field,Method,Obj} [AFMO]
|
|
||||||
//
|
|
||||||
// All valid paths start with a package and end at an object
|
|
||||||
// and thus may be defined by the regular language:
|
|
||||||
//
|
|
||||||
// objectpath = PO (OT TT* TO)*
|
|
||||||
//
|
|
||||||
// The concrete encoding follows directly:
|
|
||||||
// - The only PO operator is Package.Scope.Lookup, which requires an identifier.
|
|
||||||
// - The only OT operator is Object.Type,
|
|
||||||
// which we encode as '.' because dot cannot appear in an identifier.
|
|
||||||
// - The TT operators are encoded as [EKPRU].
|
|
||||||
// - The OT operators are encoded as [AFMO];
|
|
||||||
// three of these (At,Field,Method) require an integer operand,
|
|
||||||
// which is encoded as a string of decimal digits.
|
|
||||||
// These indices are stable across different representations
|
|
||||||
// of the same package, even source and export data.
|
|
||||||
//
|
|
||||||
// In the example below,
|
|
||||||
//
|
|
||||||
// package p
|
|
||||||
//
|
|
||||||
// type T interface {
|
|
||||||
// f() (a string, b struct{ X int })
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// field X has the path "T.UM0.RA1.F0",
|
|
||||||
// representing the following sequence of operations:
|
|
||||||
//
|
|
||||||
// p.Lookup("T") T
|
|
||||||
// .Type().Underlying().Method(0). f
|
|
||||||
// .Type().Results().At(1) b
|
|
||||||
// .Type().Field(0) X
|
|
||||||
//
|
|
||||||
// The encoding is not maximally compact---every R or P is
|
|
||||||
// followed by an A, for example---but this simplifies the
|
|
||||||
// encoder and decoder.
|
|
||||||
//
|
|
||||||
const (
|
|
||||||
// object->type operators
|
|
||||||
opType = '.' // .Type() (Object)
|
|
||||||
|
|
||||||
// type->type operators
|
|
||||||
opElem = 'E' // .Elem() (Pointer, Slice, Array, Chan, Map)
|
|
||||||
opKey = 'K' // .Key() (Map)
|
|
||||||
opParams = 'P' // .Params() (Signature)
|
|
||||||
opResults = 'R' // .Results() (Signature)
|
|
||||||
opUnderlying = 'U' // .Underlying() (Named)
|
|
||||||
|
|
||||||
// type->object operators
|
|
||||||
opAt = 'A' // .At(i) (Tuple)
|
|
||||||
opField = 'F' // .Field(i) (Struct)
|
|
||||||
opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored)
|
|
||||||
opObj = 'O' // .Obj() (Named)
|
|
||||||
)
|
|
||||||
|
|
||||||
// The For function returns the path to an object relative to its package,
|
|
||||||
// or an error if the object is not accessible from the package's Scope.
|
|
||||||
//
|
|
||||||
// The For function guarantees to return a path only for the following objects:
|
|
||||||
// - package-level types
|
|
||||||
// - exported package-level non-types
|
|
||||||
// - methods
|
|
||||||
// - parameter and result variables
|
|
||||||
// - struct fields
|
|
||||||
// These objects are sufficient to define the API of their package.
|
|
||||||
// The objects described by a package's export data are drawn from this set.
|
|
||||||
//
|
|
||||||
// For does not return a path for predeclared names, imported package
|
|
||||||
// names, local names, and unexported package-level names (except
|
|
||||||
// types).
|
|
||||||
//
|
|
||||||
// Example: given this definition,
|
|
||||||
//
|
|
||||||
// package p
|
|
||||||
//
|
|
||||||
// type T interface {
|
|
||||||
// f() (a string, b struct{ X int })
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// For(X) would return a path that denotes the following sequence of operations:
|
|
||||||
//
|
|
||||||
// p.Scope().Lookup("T") (TypeName T)
|
|
||||||
// .Type().Underlying().Method(0). (method Func f)
|
|
||||||
// .Type().Results().At(1) (field Var b)
|
|
||||||
// .Type().Field(0) (field Var X)
|
|
||||||
//
|
|
||||||
// where p is the package (*types.Package) to which X belongs.
|
|
||||||
func For(obj types.Object) (Path, error) {
|
|
||||||
pkg := obj.Pkg()
|
|
||||||
|
|
||||||
// This table lists the cases of interest.
|
|
||||||
//
|
|
||||||
// Object Action
|
|
||||||
// ------ ------
|
|
||||||
// nil reject
|
|
||||||
// builtin reject
|
|
||||||
// pkgname reject
|
|
||||||
// label reject
|
|
||||||
// var
|
|
||||||
// package-level accept
|
|
||||||
// func param/result accept
|
|
||||||
// local reject
|
|
||||||
// struct field accept
|
|
||||||
// const
|
|
||||||
// package-level accept
|
|
||||||
// local reject
|
|
||||||
// func
|
|
||||||
// package-level accept
|
|
||||||
// init functions reject
|
|
||||||
// concrete method accept
|
|
||||||
// interface method accept
|
|
||||||
// type
|
|
||||||
// package-level accept
|
|
||||||
// local reject
|
|
||||||
//
|
|
||||||
// The only accessible package-level objects are members of pkg itself.
|
|
||||||
//
|
|
||||||
// The cases are handled in four steps:
|
|
||||||
//
|
|
||||||
// 1. reject nil and builtin
|
|
||||||
// 2. accept package-level objects
|
|
||||||
// 3. reject obviously invalid objects
|
|
||||||
// 4. search the API for the path to the param/result/field/method.
|
|
||||||
|
|
||||||
// 1. reference to nil or builtin?
|
|
||||||
if pkg == nil {
|
|
||||||
return "", fmt.Errorf("predeclared %s has no path", obj)
|
|
||||||
}
|
|
||||||
scope := pkg.Scope()
|
|
||||||
|
|
||||||
// 2. package-level object?
|
|
||||||
if scope.Lookup(obj.Name()) == obj {
|
|
||||||
// Only exported objects (and non-exported types) have a path.
|
|
||||||
// Non-exported types may be referenced by other objects.
|
|
||||||
if _, ok := obj.(*types.TypeName); !ok && !obj.Exported() {
|
|
||||||
return "", fmt.Errorf("no path for non-exported %v", obj)
|
|
||||||
}
|
|
||||||
return Path(obj.Name()), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// 3. Not a package-level object.
|
|
||||||
// Reject obviously non-viable cases.
|
|
||||||
switch obj := obj.(type) {
|
|
||||||
case *types.Const, // Only package-level constants have a path.
|
|
||||||
*types.TypeName, // Only package-level types have a path.
|
|
||||||
*types.Label, // Labels are function-local.
|
|
||||||
*types.PkgName: // PkgNames are file-local.
|
|
||||||
return "", fmt.Errorf("no path for %v", obj)
|
|
||||||
|
|
||||||
case *types.Var:
|
|
||||||
// Could be:
|
|
||||||
// - a field (obj.IsField())
|
|
||||||
// - a func parameter or result
|
|
||||||
// - a local var.
|
|
||||||
// Sadly there is no way to distinguish
|
|
||||||
// a param/result from a local
|
|
||||||
// so we must proceed to the find.
|
|
||||||
|
|
||||||
case *types.Func:
|
|
||||||
// A func, if not package-level, must be a method.
|
|
||||||
if recv := obj.Type().(*types.Signature).Recv(); recv == nil {
|
|
||||||
return "", fmt.Errorf("func is not a method: %v", obj)
|
|
||||||
}
|
|
||||||
// TODO(adonovan): opt: if the method is concrete,
|
|
||||||
// do a specialized version of the rest of this function so
|
|
||||||
// that it's O(1) not O(|scope|). Basically 'find' is needed
|
|
||||||
// only for struct fields and interface methods.
|
|
||||||
|
|
||||||
default:
|
|
||||||
panic(obj)
|
|
||||||
}
|
|
||||||
|
|
||||||
// 4. Search the API for the path to the var (field/param/result) or method.
|
|
||||||
|
|
||||||
// First inspect package-level named types.
|
|
||||||
// In the presence of path aliases, these give
|
|
||||||
// the best paths because non-types may
|
|
||||||
// refer to types, but not the reverse.
|
|
||||||
empty := make([]byte, 0, 48) // initial space
|
|
||||||
names := scope.Names()
|
|
||||||
for _, name := range names {
|
|
||||||
o := scope.Lookup(name)
|
|
||||||
tname, ok := o.(*types.TypeName)
|
|
||||||
if !ok {
|
|
||||||
continue // handle non-types in second pass
|
|
||||||
}
|
|
||||||
|
|
||||||
path := append(empty, name...)
|
|
||||||
path = append(path, opType)
|
|
||||||
|
|
||||||
T := o.Type()
|
|
||||||
|
|
||||||
if tname.IsAlias() {
|
|
||||||
// type alias
|
|
||||||
if r := find(obj, T, path); r != nil {
|
|
||||||
return Path(r), nil
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// defined (named) type
|
|
||||||
if r := find(obj, T.Underlying(), append(path, opUnderlying)); r != nil {
|
|
||||||
return Path(r), nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Then inspect everything else:
|
|
||||||
// non-types, and declared methods of defined types.
|
|
||||||
for _, name := range names {
|
|
||||||
o := scope.Lookup(name)
|
|
||||||
path := append(empty, name...)
|
|
||||||
if _, ok := o.(*types.TypeName); !ok {
|
|
||||||
if o.Exported() {
|
|
||||||
// exported non-type (const, var, func)
|
|
||||||
if r := find(obj, o.Type(), append(path, opType)); r != nil {
|
|
||||||
return Path(r), nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Inspect declared methods of defined types.
|
|
||||||
if T, ok := o.Type().(*types.Named); ok {
|
|
||||||
path = append(path, opType)
|
|
||||||
for i := 0; i < T.NumMethods(); i++ {
|
|
||||||
m := T.Method(i)
|
|
||||||
path2 := appendOpArg(path, opMethod, i)
|
|
||||||
if m == obj {
|
|
||||||
return Path(path2), nil // found declared method
|
|
||||||
}
|
|
||||||
if r := find(obj, m.Type(), append(path2, opType)); r != nil {
|
|
||||||
return Path(r), nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return "", fmt.Errorf("can't find path for %v in %s", obj, pkg.Path())
|
|
||||||
}
|
|
||||||
|
|
||||||
func appendOpArg(path []byte, op byte, arg int) []byte {
|
|
||||||
path = append(path, op)
|
|
||||||
path = strconv.AppendInt(path, int64(arg), 10)
|
|
||||||
return path
|
|
||||||
}
|
|
||||||
|
|
||||||
// find finds obj within type T, returning the path to it, or nil if not found.
|
|
||||||
func find(obj types.Object, T types.Type, path []byte) []byte {
|
|
||||||
switch T := T.(type) {
|
|
||||||
case *types.Basic, *types.Named:
|
|
||||||
// Named types belonging to pkg were handled already,
|
|
||||||
// so T must belong to another package. No path.
|
|
||||||
return nil
|
|
||||||
case *types.Pointer:
|
|
||||||
return find(obj, T.Elem(), append(path, opElem))
|
|
||||||
case *types.Slice:
|
|
||||||
return find(obj, T.Elem(), append(path, opElem))
|
|
||||||
case *types.Array:
|
|
||||||
return find(obj, T.Elem(), append(path, opElem))
|
|
||||||
case *types.Chan:
|
|
||||||
return find(obj, T.Elem(), append(path, opElem))
|
|
||||||
case *types.Map:
|
|
||||||
if r := find(obj, T.Key(), append(path, opKey)); r != nil {
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
return find(obj, T.Elem(), append(path, opElem))
|
|
||||||
case *types.Signature:
|
|
||||||
if r := find(obj, T.Params(), append(path, opParams)); r != nil {
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
return find(obj, T.Results(), append(path, opResults))
|
|
||||||
case *types.Struct:
|
|
||||||
for i := 0; i < T.NumFields(); i++ {
|
|
||||||
f := T.Field(i)
|
|
||||||
path2 := appendOpArg(path, opField, i)
|
|
||||||
if f == obj {
|
|
||||||
return path2 // found field var
|
|
||||||
}
|
|
||||||
if r := find(obj, f.Type(), append(path2, opType)); r != nil {
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
case *types.Tuple:
|
|
||||||
for i := 0; i < T.Len(); i++ {
|
|
||||||
v := T.At(i)
|
|
||||||
path2 := appendOpArg(path, opAt, i)
|
|
||||||
if v == obj {
|
|
||||||
return path2 // found param/result var
|
|
||||||
}
|
|
||||||
if r := find(obj, v.Type(), append(path2, opType)); r != nil {
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
case *types.Interface:
|
|
||||||
for i := 0; i < T.NumMethods(); i++ {
|
|
||||||
m := T.Method(i)
|
|
||||||
path2 := appendOpArg(path, opMethod, i)
|
|
||||||
if m == obj {
|
|
||||||
return path2 // found interface method
|
|
||||||
}
|
|
||||||
if r := find(obj, m.Type(), append(path2, opType)); r != nil {
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
panic(T)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Object returns the object denoted by path p within the package pkg.
|
|
||||||
func Object(pkg *types.Package, p Path) (types.Object, error) {
|
|
||||||
if p == "" {
|
|
||||||
return nil, fmt.Errorf("empty path")
|
|
||||||
}
|
|
||||||
|
|
||||||
pathstr := string(p)
|
|
||||||
var pkgobj, suffix string
|
|
||||||
if dot := strings.IndexByte(pathstr, opType); dot < 0 {
|
|
||||||
pkgobj = pathstr
|
|
||||||
} else {
|
|
||||||
pkgobj = pathstr[:dot]
|
|
||||||
suffix = pathstr[dot:] // suffix starts with "."
|
|
||||||
}
|
|
||||||
|
|
||||||
obj := pkg.Scope().Lookup(pkgobj)
|
|
||||||
if obj == nil {
|
|
||||||
return nil, fmt.Errorf("package %s does not contain %q", pkg.Path(), pkgobj)
|
|
||||||
}
|
|
||||||
|
|
||||||
// abstraction of *types.{Pointer,Slice,Array,Chan,Map}
|
|
||||||
type hasElem interface {
|
|
||||||
Elem() types.Type
|
|
||||||
}
|
|
||||||
// abstraction of *types.{Interface,Named}
|
|
||||||
type hasMethods interface {
|
|
||||||
Method(int) *types.Func
|
|
||||||
NumMethods() int
|
|
||||||
}
|
|
||||||
|
|
||||||
// The loop state is the pair (t, obj),
|
|
||||||
// exactly one of which is non-nil, initially obj.
|
|
||||||
// All suffixes start with '.' (the only object->type operation),
|
|
||||||
// followed by optional type->type operations,
|
|
||||||
// then a type->object operation.
|
|
||||||
// The cycle then repeats.
|
|
||||||
var t types.Type
|
|
||||||
for suffix != "" {
|
|
||||||
code := suffix[0]
|
|
||||||
suffix = suffix[1:]
|
|
||||||
|
|
||||||
// Codes [AFM] have an integer operand.
|
|
||||||
var index int
|
|
||||||
switch code {
|
|
||||||
case opAt, opField, opMethod:
|
|
||||||
rest := strings.TrimLeft(suffix, "0123456789")
|
|
||||||
numerals := suffix[:len(suffix)-len(rest)]
|
|
||||||
suffix = rest
|
|
||||||
i, err := strconv.Atoi(numerals)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("invalid path: bad numeric operand %q for code %q", numerals, code)
|
|
||||||
}
|
|
||||||
index = int(i)
|
|
||||||
case opObj:
|
|
||||||
// no operand
|
|
||||||
default:
|
|
||||||
// The suffix must end with a type->object operation.
|
|
||||||
if suffix == "" {
|
|
||||||
return nil, fmt.Errorf("invalid path: ends with %q, want [AFMO]", code)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if code == opType {
|
|
||||||
if t != nil {
|
|
||||||
return nil, fmt.Errorf("invalid path: unexpected %q in type context", opType)
|
|
||||||
}
|
|
||||||
t = obj.Type()
|
|
||||||
obj = nil
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if t == nil {
|
|
||||||
return nil, fmt.Errorf("invalid path: code %q in object context", code)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Inv: t != nil, obj == nil
|
|
||||||
|
|
||||||
switch code {
|
|
||||||
case opElem:
|
|
||||||
hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want pointer, slice, array, chan or map)", code, t, t)
|
|
||||||
}
|
|
||||||
t = hasElem.Elem()
|
|
||||||
|
|
||||||
case opKey:
|
|
||||||
mapType, ok := t.(*types.Map)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want map)", code, t, t)
|
|
||||||
}
|
|
||||||
t = mapType.Key()
|
|
||||||
|
|
||||||
case opParams:
|
|
||||||
sig, ok := t.(*types.Signature)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t)
|
|
||||||
}
|
|
||||||
t = sig.Params()
|
|
||||||
|
|
||||||
case opResults:
|
|
||||||
sig, ok := t.(*types.Signature)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t)
|
|
||||||
}
|
|
||||||
t = sig.Results()
|
|
||||||
|
|
||||||
case opUnderlying:
|
|
||||||
named, ok := t.(*types.Named)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("cannot apply %q to %s (got %s, want named)", code, t, t)
|
|
||||||
}
|
|
||||||
t = named.Underlying()
|
|
||||||
|
|
||||||
case opAt:
|
|
||||||
tuple, ok := t.(*types.Tuple)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("cannot apply %q to %s (got %s, want tuple)", code, t, t)
|
|
||||||
}
|
|
||||||
if n := tuple.Len(); index >= n {
|
|
||||||
return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n)
|
|
||||||
}
|
|
||||||
obj = tuple.At(index)
|
|
||||||
t = nil
|
|
||||||
|
|
||||||
case opField:
|
|
||||||
structType, ok := t.(*types.Struct)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want struct)", code, t, t)
|
|
||||||
}
|
|
||||||
if n := structType.NumFields(); index >= n {
|
|
||||||
return nil, fmt.Errorf("field index %d out of range [0-%d)", index, n)
|
|
||||||
}
|
|
||||||
obj = structType.Field(index)
|
|
||||||
t = nil
|
|
||||||
|
|
||||||
case opMethod:
|
|
||||||
hasMethods, ok := t.(hasMethods) // Interface or Named
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("cannot apply %q to %s (got %s, want interface or named)", code, t, t)
|
|
||||||
}
|
|
||||||
if n := hasMethods.NumMethods(); index >= n {
|
|
||||||
return nil, fmt.Errorf("method index %d out of range [0-%d)", index, n)
|
|
||||||
}
|
|
||||||
obj = hasMethods.Method(index)
|
|
||||||
t = nil
|
|
||||||
|
|
||||||
case opObj:
|
|
||||||
named, ok := t.(*types.Named)
|
|
||||||
if !ok {
|
|
||||||
return nil, fmt.Errorf("cannot apply %q to %s (got %s, want named)", code, t, t)
|
|
||||||
}
|
|
||||||
obj = named.Obj()
|
|
||||||
t = nil
|
|
||||||
|
|
||||||
default:
|
|
||||||
return nil, fmt.Errorf("invalid path: unknown code %q", code)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if obj.Pkg() != pkg {
|
|
||||||
return nil, fmt.Errorf("path denotes %s, which belongs to a different package", obj)
|
|
||||||
}
|
|
||||||
|
|
||||||
return obj, nil // success
|
|
||||||
}
|
|
46
vendor/golang.org/x/tools/go/types/typeutil/callee.go
generated
vendored
46
vendor/golang.org/x/tools/go/types/typeutil/callee.go
generated
vendored
|
@ -1,46 +0,0 @@
|
||||||
// Copyright 2018 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package typeutil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"go/ast"
|
|
||||||
"go/types"
|
|
||||||
|
|
||||||
"golang.org/x/tools/go/ast/astutil"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Callee returns the named target of a function call, if any:
|
|
||||||
// a function, method, builtin, or variable.
|
|
||||||
func Callee(info *types.Info, call *ast.CallExpr) types.Object {
|
|
||||||
var obj types.Object
|
|
||||||
switch fun := astutil.Unparen(call.Fun).(type) {
|
|
||||||
case *ast.Ident:
|
|
||||||
obj = info.Uses[fun] // type, var, builtin, or declared func
|
|
||||||
case *ast.SelectorExpr:
|
|
||||||
if sel, ok := info.Selections[fun]; ok {
|
|
||||||
obj = sel.Obj() // method or field
|
|
||||||
} else {
|
|
||||||
obj = info.Uses[fun.Sel] // qualified identifier?
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if _, ok := obj.(*types.TypeName); ok {
|
|
||||||
return nil // T(x) is a conversion, not a call
|
|
||||||
}
|
|
||||||
return obj
|
|
||||||
}
|
|
||||||
|
|
||||||
// StaticCallee returns the target (function or method) of a static
|
|
||||||
// function call, if any. It returns nil for calls to builtins.
|
|
||||||
func StaticCallee(info *types.Info, call *ast.CallExpr) *types.Func {
|
|
||||||
if f, ok := Callee(info, call).(*types.Func); ok && !interfaceMethod(f) {
|
|
||||||
return f
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func interfaceMethod(f *types.Func) bool {
|
|
||||||
recv := f.Type().(*types.Signature).Recv()
|
|
||||||
return recv != nil && types.IsInterface(recv.Type())
|
|
||||||
}
|
|
31
vendor/golang.org/x/tools/go/types/typeutil/imports.go
generated
vendored
31
vendor/golang.org/x/tools/go/types/typeutil/imports.go
generated
vendored
|
@ -1,31 +0,0 @@
|
||||||
// Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package typeutil
|
|
||||||
|
|
||||||
import "go/types"
|
|
||||||
|
|
||||||
// Dependencies returns all dependencies of the specified packages.
|
|
||||||
//
|
|
||||||
// Dependent packages appear in topological order: if package P imports
|
|
||||||
// package Q, Q appears earlier than P in the result.
|
|
||||||
// The algorithm follows import statements in the order they
|
|
||||||
// appear in the source code, so the result is a total order.
|
|
||||||
//
|
|
||||||
func Dependencies(pkgs ...*types.Package) []*types.Package {
|
|
||||||
var result []*types.Package
|
|
||||||
seen := make(map[*types.Package]bool)
|
|
||||||
var visit func(pkgs []*types.Package)
|
|
||||||
visit = func(pkgs []*types.Package) {
|
|
||||||
for _, p := range pkgs {
|
|
||||||
if !seen[p] {
|
|
||||||
seen[p] = true
|
|
||||||
visit(p.Imports())
|
|
||||||
result = append(result, p)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
visit(pkgs)
|
|
||||||
return result
|
|
||||||
}
|
|
313
vendor/golang.org/x/tools/go/types/typeutil/map.go
generated
vendored
313
vendor/golang.org/x/tools/go/types/typeutil/map.go
generated
vendored
|
@ -1,313 +0,0 @@
|
||||||
// Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package typeutil defines various utilities for types, such as Map,
|
|
||||||
// a mapping from types.Type to interface{} values.
|
|
||||||
package typeutil // import "golang.org/x/tools/go/types/typeutil"
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"go/types"
|
|
||||||
"reflect"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Map is a hash-table-based mapping from types (types.Type) to
|
|
||||||
// arbitrary interface{} values. The concrete types that implement
|
|
||||||
// the Type interface are pointers. Since they are not canonicalized,
|
|
||||||
// == cannot be used to check for equivalence, and thus we cannot
|
|
||||||
// simply use a Go map.
|
|
||||||
//
|
|
||||||
// Just as with map[K]V, a nil *Map is a valid empty map.
|
|
||||||
//
|
|
||||||
// Not thread-safe.
|
|
||||||
//
|
|
||||||
type Map struct {
|
|
||||||
hasher Hasher // shared by many Maps
|
|
||||||
table map[uint32][]entry // maps hash to bucket; entry.key==nil means unused
|
|
||||||
length int // number of map entries
|
|
||||||
}
|
|
||||||
|
|
||||||
// entry is an entry (key/value association) in a hash bucket.
|
|
||||||
type entry struct {
|
|
||||||
key types.Type
|
|
||||||
value interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetHasher sets the hasher used by Map.
|
|
||||||
//
|
|
||||||
// All Hashers are functionally equivalent but contain internal state
|
|
||||||
// used to cache the results of hashing previously seen types.
|
|
||||||
//
|
|
||||||
// A single Hasher created by MakeHasher() may be shared among many
|
|
||||||
// Maps. This is recommended if the instances have many keys in
|
|
||||||
// common, as it will amortize the cost of hash computation.
|
|
||||||
//
|
|
||||||
// A Hasher may grow without bound as new types are seen. Even when a
|
|
||||||
// type is deleted from the map, the Hasher never shrinks, since other
|
|
||||||
// types in the map may reference the deleted type indirectly.
|
|
||||||
//
|
|
||||||
// Hashers are not thread-safe, and read-only operations such as
|
|
||||||
// Map.Lookup require updates to the hasher, so a full Mutex lock (not a
|
|
||||||
// read-lock) is require around all Map operations if a shared
|
|
||||||
// hasher is accessed from multiple threads.
|
|
||||||
//
|
|
||||||
// If SetHasher is not called, the Map will create a private hasher at
|
|
||||||
// the first call to Insert.
|
|
||||||
//
|
|
||||||
func (m *Map) SetHasher(hasher Hasher) {
|
|
||||||
m.hasher = hasher
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete removes the entry with the given key, if any.
|
|
||||||
// It returns true if the entry was found.
|
|
||||||
//
|
|
||||||
func (m *Map) Delete(key types.Type) bool {
|
|
||||||
if m != nil && m.table != nil {
|
|
||||||
hash := m.hasher.Hash(key)
|
|
||||||
bucket := m.table[hash]
|
|
||||||
for i, e := range bucket {
|
|
||||||
if e.key != nil && types.Identical(key, e.key) {
|
|
||||||
// We can't compact the bucket as it
|
|
||||||
// would disturb iterators.
|
|
||||||
bucket[i] = entry{}
|
|
||||||
m.length--
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// At returns the map entry for the given key.
|
|
||||||
// The result is nil if the entry is not present.
|
|
||||||
//
|
|
||||||
func (m *Map) At(key types.Type) interface{} {
|
|
||||||
if m != nil && m.table != nil {
|
|
||||||
for _, e := range m.table[m.hasher.Hash(key)] {
|
|
||||||
if e.key != nil && types.Identical(key, e.key) {
|
|
||||||
return e.value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set sets the map entry for key to val,
|
|
||||||
// and returns the previous entry, if any.
|
|
||||||
func (m *Map) Set(key types.Type, value interface{}) (prev interface{}) {
|
|
||||||
if m.table != nil {
|
|
||||||
hash := m.hasher.Hash(key)
|
|
||||||
bucket := m.table[hash]
|
|
||||||
var hole *entry
|
|
||||||
for i, e := range bucket {
|
|
||||||
if e.key == nil {
|
|
||||||
hole = &bucket[i]
|
|
||||||
} else if types.Identical(key, e.key) {
|
|
||||||
prev = e.value
|
|
||||||
bucket[i].value = value
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if hole != nil {
|
|
||||||
*hole = entry{key, value} // overwrite deleted entry
|
|
||||||
} else {
|
|
||||||
m.table[hash] = append(bucket, entry{key, value})
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if m.hasher.memo == nil {
|
|
||||||
m.hasher = MakeHasher()
|
|
||||||
}
|
|
||||||
hash := m.hasher.Hash(key)
|
|
||||||
m.table = map[uint32][]entry{hash: {entry{key, value}}}
|
|
||||||
}
|
|
||||||
|
|
||||||
m.length++
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Len returns the number of map entries.
|
|
||||||
func (m *Map) Len() int {
|
|
||||||
if m != nil {
|
|
||||||
return m.length
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Iterate calls function f on each entry in the map in unspecified order.
|
|
||||||
//
|
|
||||||
// If f should mutate the map, Iterate provides the same guarantees as
|
|
||||||
// Go maps: if f deletes a map entry that Iterate has not yet reached,
|
|
||||||
// f will not be invoked for it, but if f inserts a map entry that
|
|
||||||
// Iterate has not yet reached, whether or not f will be invoked for
|
|
||||||
// it is unspecified.
|
|
||||||
//
|
|
||||||
func (m *Map) Iterate(f func(key types.Type, value interface{})) {
|
|
||||||
if m != nil {
|
|
||||||
for _, bucket := range m.table {
|
|
||||||
for _, e := range bucket {
|
|
||||||
if e.key != nil {
|
|
||||||
f(e.key, e.value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Keys returns a new slice containing the set of map keys.
|
|
||||||
// The order is unspecified.
|
|
||||||
func (m *Map) Keys() []types.Type {
|
|
||||||
keys := make([]types.Type, 0, m.Len())
|
|
||||||
m.Iterate(func(key types.Type, _ interface{}) {
|
|
||||||
keys = append(keys, key)
|
|
||||||
})
|
|
||||||
return keys
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *Map) toString(values bool) string {
|
|
||||||
if m == nil {
|
|
||||||
return "{}"
|
|
||||||
}
|
|
||||||
var buf bytes.Buffer
|
|
||||||
fmt.Fprint(&buf, "{")
|
|
||||||
sep := ""
|
|
||||||
m.Iterate(func(key types.Type, value interface{}) {
|
|
||||||
fmt.Fprint(&buf, sep)
|
|
||||||
sep = ", "
|
|
||||||
fmt.Fprint(&buf, key)
|
|
||||||
if values {
|
|
||||||
fmt.Fprintf(&buf, ": %q", value)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
fmt.Fprint(&buf, "}")
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns a string representation of the map's entries.
|
|
||||||
// Values are printed using fmt.Sprintf("%v", v).
|
|
||||||
// Order is unspecified.
|
|
||||||
//
|
|
||||||
func (m *Map) String() string {
|
|
||||||
return m.toString(true)
|
|
||||||
}
|
|
||||||
|
|
||||||
// KeysString returns a string representation of the map's key set.
|
|
||||||
// Order is unspecified.
|
|
||||||
//
|
|
||||||
func (m *Map) KeysString() string {
|
|
||||||
return m.toString(false)
|
|
||||||
}
|
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////
|
|
||||||
// Hasher
|
|
||||||
|
|
||||||
// A Hasher maps each type to its hash value.
|
|
||||||
// For efficiency, a hasher uses memoization; thus its memory
|
|
||||||
// footprint grows monotonically over time.
|
|
||||||
// Hashers are not thread-safe.
|
|
||||||
// Hashers have reference semantics.
|
|
||||||
// Call MakeHasher to create a Hasher.
|
|
||||||
type Hasher struct {
|
|
||||||
memo map[types.Type]uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
// MakeHasher returns a new Hasher instance.
|
|
||||||
func MakeHasher() Hasher {
|
|
||||||
return Hasher{make(map[types.Type]uint32)}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hash computes a hash value for the given type t such that
|
|
||||||
// Identical(t, t') => Hash(t) == Hash(t').
|
|
||||||
func (h Hasher) Hash(t types.Type) uint32 {
|
|
||||||
hash, ok := h.memo[t]
|
|
||||||
if !ok {
|
|
||||||
hash = h.hashFor(t)
|
|
||||||
h.memo[t] = hash
|
|
||||||
}
|
|
||||||
return hash
|
|
||||||
}
|
|
||||||
|
|
||||||
// hashString computes the Fowler–Noll–Vo hash of s.
|
|
||||||
func hashString(s string) uint32 {
|
|
||||||
var h uint32
|
|
||||||
for i := 0; i < len(s); i++ {
|
|
||||||
h ^= uint32(s[i])
|
|
||||||
h *= 16777619
|
|
||||||
}
|
|
||||||
return h
|
|
||||||
}
|
|
||||||
|
|
||||||
// hashFor computes the hash of t.
|
|
||||||
func (h Hasher) hashFor(t types.Type) uint32 {
|
|
||||||
// See Identical for rationale.
|
|
||||||
switch t := t.(type) {
|
|
||||||
case *types.Basic:
|
|
||||||
return uint32(t.Kind())
|
|
||||||
|
|
||||||
case *types.Array:
|
|
||||||
return 9043 + 2*uint32(t.Len()) + 3*h.Hash(t.Elem())
|
|
||||||
|
|
||||||
case *types.Slice:
|
|
||||||
return 9049 + 2*h.Hash(t.Elem())
|
|
||||||
|
|
||||||
case *types.Struct:
|
|
||||||
var hash uint32 = 9059
|
|
||||||
for i, n := 0, t.NumFields(); i < n; i++ {
|
|
||||||
f := t.Field(i)
|
|
||||||
if f.Anonymous() {
|
|
||||||
hash += 8861
|
|
||||||
}
|
|
||||||
hash += hashString(t.Tag(i))
|
|
||||||
hash += hashString(f.Name()) // (ignore f.Pkg)
|
|
||||||
hash += h.Hash(f.Type())
|
|
||||||
}
|
|
||||||
return hash
|
|
||||||
|
|
||||||
case *types.Pointer:
|
|
||||||
return 9067 + 2*h.Hash(t.Elem())
|
|
||||||
|
|
||||||
case *types.Signature:
|
|
||||||
var hash uint32 = 9091
|
|
||||||
if t.Variadic() {
|
|
||||||
hash *= 8863
|
|
||||||
}
|
|
||||||
return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results())
|
|
||||||
|
|
||||||
case *types.Interface:
|
|
||||||
var hash uint32 = 9103
|
|
||||||
for i, n := 0, t.NumMethods(); i < n; i++ {
|
|
||||||
// See go/types.identicalMethods for rationale.
|
|
||||||
// Method order is not significant.
|
|
||||||
// Ignore m.Pkg().
|
|
||||||
m := t.Method(i)
|
|
||||||
hash += 3*hashString(m.Name()) + 5*h.Hash(m.Type())
|
|
||||||
}
|
|
||||||
return hash
|
|
||||||
|
|
||||||
case *types.Map:
|
|
||||||
return 9109 + 2*h.Hash(t.Key()) + 3*h.Hash(t.Elem())
|
|
||||||
|
|
||||||
case *types.Chan:
|
|
||||||
return 9127 + 2*uint32(t.Dir()) + 3*h.Hash(t.Elem())
|
|
||||||
|
|
||||||
case *types.Named:
|
|
||||||
// Not safe with a copying GC; objects may move.
|
|
||||||
return uint32(reflect.ValueOf(t.Obj()).Pointer())
|
|
||||||
|
|
||||||
case *types.Tuple:
|
|
||||||
return h.hashTuple(t)
|
|
||||||
}
|
|
||||||
panic(t)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (h Hasher) hashTuple(tuple *types.Tuple) uint32 {
|
|
||||||
// See go/types.identicalTypes for rationale.
|
|
||||||
n := tuple.Len()
|
|
||||||
var hash uint32 = 9137 + 2*uint32(n)
|
|
||||||
for i := 0; i < n; i++ {
|
|
||||||
hash += 3 * h.Hash(tuple.At(i).Type())
|
|
||||||
}
|
|
||||||
return hash
|
|
||||||
}
|
|
72
vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go
generated
vendored
72
vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go
generated
vendored
|
@ -1,72 +0,0 @@
|
||||||
// Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// This file implements a cache of method sets.
|
|
||||||
|
|
||||||
package typeutil
|
|
||||||
|
|
||||||
import (
|
|
||||||
"go/types"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A MethodSetCache records the method set of each type T for which
|
|
||||||
// MethodSet(T) is called so that repeat queries are fast.
|
|
||||||
// The zero value is a ready-to-use cache instance.
|
|
||||||
type MethodSetCache struct {
|
|
||||||
mu sync.Mutex
|
|
||||||
named map[*types.Named]struct{ value, pointer *types.MethodSet } // method sets for named N and *N
|
|
||||||
others map[types.Type]*types.MethodSet // all other types
|
|
||||||
}
|
|
||||||
|
|
||||||
// MethodSet returns the method set of type T. It is thread-safe.
|
|
||||||
//
|
|
||||||
// If cache is nil, this function is equivalent to types.NewMethodSet(T).
|
|
||||||
// Utility functions can thus expose an optional *MethodSetCache
|
|
||||||
// parameter to clients that care about performance.
|
|
||||||
//
|
|
||||||
func (cache *MethodSetCache) MethodSet(T types.Type) *types.MethodSet {
|
|
||||||
if cache == nil {
|
|
||||||
return types.NewMethodSet(T)
|
|
||||||
}
|
|
||||||
cache.mu.Lock()
|
|
||||||
defer cache.mu.Unlock()
|
|
||||||
|
|
||||||
switch T := T.(type) {
|
|
||||||
case *types.Named:
|
|
||||||
return cache.lookupNamed(T).value
|
|
||||||
|
|
||||||
case *types.Pointer:
|
|
||||||
if N, ok := T.Elem().(*types.Named); ok {
|
|
||||||
return cache.lookupNamed(N).pointer
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// all other types
|
|
||||||
// (The map uses pointer equivalence, not type identity.)
|
|
||||||
mset := cache.others[T]
|
|
||||||
if mset == nil {
|
|
||||||
mset = types.NewMethodSet(T)
|
|
||||||
if cache.others == nil {
|
|
||||||
cache.others = make(map[types.Type]*types.MethodSet)
|
|
||||||
}
|
|
||||||
cache.others[T] = mset
|
|
||||||
}
|
|
||||||
return mset
|
|
||||||
}
|
|
||||||
|
|
||||||
func (cache *MethodSetCache) lookupNamed(named *types.Named) struct{ value, pointer *types.MethodSet } {
|
|
||||||
if cache.named == nil {
|
|
||||||
cache.named = make(map[*types.Named]struct{ value, pointer *types.MethodSet })
|
|
||||||
}
|
|
||||||
// Avoid recomputing mset(*T) for each distinct Pointer
|
|
||||||
// instance whose underlying type is a named type.
|
|
||||||
msets, ok := cache.named[named]
|
|
||||||
if !ok {
|
|
||||||
msets.value = types.NewMethodSet(named)
|
|
||||||
msets.pointer = types.NewMethodSet(types.NewPointer(named))
|
|
||||||
cache.named[named] = msets
|
|
||||||
}
|
|
||||||
return msets
|
|
||||||
}
|
|
52
vendor/golang.org/x/tools/go/types/typeutil/ui.go
generated
vendored
52
vendor/golang.org/x/tools/go/types/typeutil/ui.go
generated
vendored
|
@ -1,52 +0,0 @@
|
||||||
// Copyright 2014 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package typeutil
|
|
||||||
|
|
||||||
// This file defines utilities for user interfaces that display types.
|
|
||||||
|
|
||||||
import "go/types"
|
|
||||||
|
|
||||||
// IntuitiveMethodSet returns the intuitive method set of a type T,
|
|
||||||
// which is the set of methods you can call on an addressable value of
|
|
||||||
// that type.
|
|
||||||
//
|
|
||||||
// The result always contains MethodSet(T), and is exactly MethodSet(T)
|
|
||||||
// for interface types and for pointer-to-concrete types.
|
|
||||||
// For all other concrete types T, the result additionally
|
|
||||||
// contains each method belonging to *T if there is no identically
|
|
||||||
// named method on T itself.
|
|
||||||
//
|
|
||||||
// This corresponds to user intuition about method sets;
|
|
||||||
// this function is intended only for user interfaces.
|
|
||||||
//
|
|
||||||
// The order of the result is as for types.MethodSet(T).
|
|
||||||
//
|
|
||||||
func IntuitiveMethodSet(T types.Type, msets *MethodSetCache) []*types.Selection {
|
|
||||||
isPointerToConcrete := func(T types.Type) bool {
|
|
||||||
ptr, ok := T.(*types.Pointer)
|
|
||||||
return ok && !types.IsInterface(ptr.Elem())
|
|
||||||
}
|
|
||||||
|
|
||||||
var result []*types.Selection
|
|
||||||
mset := msets.MethodSet(T)
|
|
||||||
if types.IsInterface(T) || isPointerToConcrete(T) {
|
|
||||||
for i, n := 0, mset.Len(); i < n; i++ {
|
|
||||||
result = append(result, mset.At(i))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// T is some other concrete type.
|
|
||||||
// Report methods of T and *T, preferring those of T.
|
|
||||||
pmset := msets.MethodSet(types.NewPointer(T))
|
|
||||||
for i, n := 0, pmset.Len(); i < n; i++ {
|
|
||||||
meth := pmset.At(i)
|
|
||||||
if m := mset.Lookup(meth.Obj().Pkg(), meth.Obj().Name()); m != nil {
|
|
||||||
meth = m
|
|
||||||
}
|
|
||||||
result = append(result, meth)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
118
vendor/golang.org/x/tools/internal/analysisinternal/analysis.go
generated
vendored
118
vendor/golang.org/x/tools/internal/analysisinternal/analysis.go
generated
vendored
|
@ -1,118 +0,0 @@
|
||||||
// Copyright 2020 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package analysisinternal exposes internal-only fields from go/analysis.
|
|
||||||
package analysisinternal
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"go/ast"
|
|
||||||
"go/token"
|
|
||||||
"go/types"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"golang.org/x/tools/go/ast/astutil"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TypeErrorEndPos(fset *token.FileSet, src []byte, start token.Pos) token.Pos {
|
|
||||||
// Get the end position for the type error.
|
|
||||||
offset, end := fset.PositionFor(start, false).Offset, start
|
|
||||||
if offset >= len(src) {
|
|
||||||
return end
|
|
||||||
}
|
|
||||||
if width := bytes.IndexAny(src[offset:], " \n,():;[]+-*"); width > 0 {
|
|
||||||
end = start + token.Pos(width)
|
|
||||||
}
|
|
||||||
return end
|
|
||||||
}
|
|
||||||
|
|
||||||
func ZeroValue(fset *token.FileSet, f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
|
|
||||||
under := typ
|
|
||||||
if n, ok := typ.(*types.Named); ok {
|
|
||||||
under = n.Underlying()
|
|
||||||
}
|
|
||||||
switch u := under.(type) {
|
|
||||||
case *types.Basic:
|
|
||||||
switch {
|
|
||||||
case u.Info()&types.IsNumeric != 0:
|
|
||||||
return &ast.BasicLit{Kind: token.INT, Value: "0"}
|
|
||||||
case u.Info()&types.IsBoolean != 0:
|
|
||||||
return &ast.Ident{Name: "false"}
|
|
||||||
case u.Info()&types.IsString != 0:
|
|
||||||
return &ast.BasicLit{Kind: token.STRING, Value: `""`}
|
|
||||||
default:
|
|
||||||
panic("unknown basic type")
|
|
||||||
}
|
|
||||||
case *types.Chan, *types.Interface, *types.Map, *types.Pointer, *types.Signature, *types.Slice:
|
|
||||||
return ast.NewIdent("nil")
|
|
||||||
case *types.Struct:
|
|
||||||
texpr := typeExpr(fset, f, pkg, typ) // typ because we want the name here.
|
|
||||||
if texpr == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return &ast.CompositeLit{
|
|
||||||
Type: texpr,
|
|
||||||
}
|
|
||||||
case *types.Array:
|
|
||||||
texpr := typeExpr(fset, f, pkg, u.Elem())
|
|
||||||
if texpr == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return &ast.CompositeLit{
|
|
||||||
Type: &ast.ArrayType{
|
|
||||||
Elt: texpr,
|
|
||||||
Len: &ast.BasicLit{Kind: token.INT, Value: fmt.Sprintf("%v", u.Len())},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func typeExpr(fset *token.FileSet, f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
|
|
||||||
switch t := typ.(type) {
|
|
||||||
case *types.Basic:
|
|
||||||
switch t.Kind() {
|
|
||||||
case types.UnsafePointer:
|
|
||||||
return &ast.SelectorExpr{X: ast.NewIdent("unsafe"), Sel: ast.NewIdent("Pointer")}
|
|
||||||
default:
|
|
||||||
return ast.NewIdent(t.Name())
|
|
||||||
}
|
|
||||||
case *types.Named:
|
|
||||||
if t.Obj().Pkg() == pkg {
|
|
||||||
return ast.NewIdent(t.Obj().Name())
|
|
||||||
}
|
|
||||||
pkgName := t.Obj().Pkg().Name()
|
|
||||||
// If the file already imports the package under another name, use that.
|
|
||||||
for _, group := range astutil.Imports(fset, f) {
|
|
||||||
for _, cand := range group {
|
|
||||||
if strings.Trim(cand.Path.Value, `"`) == t.Obj().Pkg().Path() {
|
|
||||||
if cand.Name != nil && cand.Name.Name != "" {
|
|
||||||
pkgName = cand.Name.Name
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if pkgName == "." {
|
|
||||||
return ast.NewIdent(t.Obj().Name())
|
|
||||||
}
|
|
||||||
return &ast.SelectorExpr{
|
|
||||||
X: ast.NewIdent(pkgName),
|
|
||||||
Sel: ast.NewIdent(t.Obj().Name()),
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
return nil // TODO: anonymous structs, but who does that
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var GetTypeErrors = func(p interface{}) []types.Error { return nil }
|
|
||||||
var SetTypeErrors = func(p interface{}, errors []types.Error) {}
|
|
||||||
|
|
||||||
type TypeErrorPass string
|
|
||||||
|
|
||||||
const (
|
|
||||||
NoNewVars TypeErrorPass = "nonewvars"
|
|
||||||
NoResultValues TypeErrorPass = "noresultvalues"
|
|
||||||
UndeclaredName TypeErrorPass = "undeclaredname"
|
|
||||||
)
|
|
116
vendor/golang.org/x/tools/internal/gocommand/invoke.go
generated
vendored
116
vendor/golang.org/x/tools/internal/gocommand/invoke.go
generated
vendored
|
@ -23,9 +23,24 @@ import (
|
||||||
// An Runner will run go command invocations and serialize
|
// An Runner will run go command invocations and serialize
|
||||||
// them if it sees a concurrency error.
|
// them if it sees a concurrency error.
|
||||||
type Runner struct {
|
type Runner struct {
|
||||||
// LoadMu guards packages.Load calls and associated state.
|
// once guards the runner initialization.
|
||||||
loadMu sync.Mutex
|
once sync.Once
|
||||||
serializeLoads int
|
|
||||||
|
// inFlight tracks available workers.
|
||||||
|
inFlight chan struct{}
|
||||||
|
|
||||||
|
// serialized guards the ability to run a go command serially,
|
||||||
|
// to avoid deadlocks when claiming workers.
|
||||||
|
serialized chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
const maxInFlight = 10
|
||||||
|
|
||||||
|
func (runner *Runner) initialize() {
|
||||||
|
runner.once.Do(func() {
|
||||||
|
runner.inFlight = make(chan struct{}, maxInFlight)
|
||||||
|
runner.serialized = make(chan struct{}, 1)
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// 1.13: go: updates to go.mod needed, but contents have changed
|
// 1.13: go: updates to go.mod needed, but contents have changed
|
||||||
|
@ -35,7 +50,7 @@ var modConcurrencyError = regexp.MustCompile(`go:.*go.mod.*contents have changed
|
||||||
// Run is a convenience wrapper around RunRaw.
|
// Run is a convenience wrapper around RunRaw.
|
||||||
// It returns only stdout and a "friendly" error.
|
// It returns only stdout and a "friendly" error.
|
||||||
func (runner *Runner) Run(ctx context.Context, inv Invocation) (*bytes.Buffer, error) {
|
func (runner *Runner) Run(ctx context.Context, inv Invocation) (*bytes.Buffer, error) {
|
||||||
stdout, _, friendly, _ := runner.runRaw(ctx, inv)
|
stdout, _, friendly, _ := runner.RunRaw(ctx, inv)
|
||||||
return stdout, friendly
|
return stdout, friendly
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -49,55 +64,65 @@ func (runner *Runner) RunPiped(ctx context.Context, inv Invocation, stdout, stde
|
||||||
// RunRaw runs the invocation, serializing requests only if they fight over
|
// RunRaw runs the invocation, serializing requests only if they fight over
|
||||||
// go.mod changes.
|
// go.mod changes.
|
||||||
func (runner *Runner) RunRaw(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) {
|
func (runner *Runner) RunRaw(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) {
|
||||||
return runner.runRaw(ctx, inv)
|
// Make sure the runner is always initialized.
|
||||||
}
|
runner.initialize()
|
||||||
|
|
||||||
func (runner *Runner) runPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) (error, error) {
|
// First, try to run the go command concurrently.
|
||||||
runner.loadMu.Lock()
|
stdout, stderr, friendlyErr, err := runner.runConcurrent(ctx, inv)
|
||||||
runner.serializeLoads++
|
|
||||||
|
|
||||||
defer func() {
|
// If we encounter a load concurrency error, we need to retry serially.
|
||||||
runner.serializeLoads--
|
|
||||||
runner.loadMu.Unlock()
|
|
||||||
}()
|
|
||||||
|
|
||||||
return inv.runWithFriendlyError(ctx, stdout, stderr)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (runner *Runner) runRaw(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) {
|
|
||||||
// We want to run invocations concurrently as much as possible. However,
|
|
||||||
// if go.mod updates are needed, only one can make them and the others will
|
|
||||||
// fail. We need to retry in those cases, but we don't want to thrash so
|
|
||||||
// badly we never recover. To avoid that, once we've seen one concurrency
|
|
||||||
// error, start serializing everything until the backlog has cleared out.
|
|
||||||
runner.loadMu.Lock()
|
|
||||||
var locked bool // If true, we hold the mutex and have incremented.
|
|
||||||
if runner.serializeLoads == 0 {
|
|
||||||
runner.loadMu.Unlock()
|
|
||||||
} else {
|
|
||||||
locked = true
|
|
||||||
runner.serializeLoads++
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
if locked {
|
|
||||||
locked = false
|
|
||||||
runner.serializeLoads--
|
|
||||||
runner.loadMu.Unlock()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
for {
|
|
||||||
stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{}
|
|
||||||
friendlyErr, err := inv.runWithFriendlyError(ctx, stdout, stderr)
|
|
||||||
if friendlyErr == nil || !modConcurrencyError.MatchString(friendlyErr.Error()) {
|
if friendlyErr == nil || !modConcurrencyError.MatchString(friendlyErr.Error()) {
|
||||||
return stdout, stderr, friendlyErr, err
|
return stdout, stderr, friendlyErr, err
|
||||||
}
|
}
|
||||||
event.Error(ctx, "Load concurrency error, will retry serially", err)
|
event.Error(ctx, "Load concurrency error, will retry serially", err)
|
||||||
if !locked {
|
|
||||||
runner.loadMu.Lock()
|
// Run serially by calling runPiped.
|
||||||
runner.serializeLoads++
|
stdout.Reset()
|
||||||
|
stderr.Reset()
|
||||||
|
friendlyErr, err = runner.runPiped(ctx, inv, stdout, stderr)
|
||||||
|
return stdout, stderr, friendlyErr, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (runner *Runner) runConcurrent(ctx context.Context, inv Invocation) (*bytes.Buffer, *bytes.Buffer, error, error) {
|
||||||
|
// Wait for 1 worker to become available.
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return nil, nil, nil, ctx.Err()
|
||||||
|
case runner.inFlight <- struct{}{}:
|
||||||
|
defer func() { <-runner.inFlight }()
|
||||||
|
}
|
||||||
|
|
||||||
|
stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{}
|
||||||
|
friendlyErr, err := inv.runWithFriendlyError(ctx, stdout, stderr)
|
||||||
|
return stdout, stderr, friendlyErr, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (runner *Runner) runPiped(ctx context.Context, inv Invocation, stdout, stderr io.Writer) (error, error) {
|
||||||
|
// Make sure the runner is always initialized.
|
||||||
|
runner.initialize()
|
||||||
|
|
||||||
|
// Acquire the serialization lock. This avoids deadlocks between two
|
||||||
|
// runPiped commands.
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return nil, ctx.Err()
|
||||||
|
case runner.serialized <- struct{}{}:
|
||||||
|
defer func() { <-runner.serialized }()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Wait for all in-progress go commands to return before proceeding,
|
||||||
|
// to avoid load concurrency errors.
|
||||||
|
for i := 0; i < maxInFlight; i++ {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return nil, ctx.Err()
|
||||||
|
case runner.inFlight <- struct{}{}:
|
||||||
|
// Make sure we always "return" any workers we took.
|
||||||
|
defer func() { <-runner.inFlight }()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return inv.runWithFriendlyError(ctx, stdout, stderr)
|
||||||
}
|
}
|
||||||
|
|
||||||
// An Invocation represents a call to the go command.
|
// An Invocation represents a call to the go command.
|
||||||
|
@ -160,7 +185,6 @@ func (i *Invocation) run(ctx context.Context, stdout, stderr io.Writer) error {
|
||||||
cmd.Env = append(cmd.Env, "PWD="+i.WorkingDir)
|
cmd.Env = append(cmd.Env, "PWD="+i.WorkingDir)
|
||||||
cmd.Dir = i.WorkingDir
|
cmd.Dir = i.WorkingDir
|
||||||
}
|
}
|
||||||
|
|
||||||
defer func(start time.Time) { log("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now())
|
defer func(start time.Time) { log("%s for %v", time.Since(start), cmdDebugStr(cmd)) }(time.Now())
|
||||||
|
|
||||||
return runCmdContext(ctx, cmd)
|
return runCmdContext(ctx, cmd)
|
||||||
|
|
84
vendor/golang.org/x/tools/internal/imports/fix.go
generated
vendored
84
vendor/golang.org/x/tools/internal/imports/fix.go
generated
vendored
|
@ -7,6 +7,7 @@ package imports
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"go/ast"
|
"go/ast"
|
||||||
"go/build"
|
"go/build"
|
||||||
|
@ -598,7 +599,7 @@ func getCandidatePkgs(ctx context.Context, wrappedCallback *scanCallback, filena
|
||||||
// Start off with the standard library.
|
// Start off with the standard library.
|
||||||
for importPath, exports := range stdlib {
|
for importPath, exports := range stdlib {
|
||||||
p := &pkg{
|
p := &pkg{
|
||||||
dir: filepath.Join(env.GOROOT, "src", importPath),
|
dir: filepath.Join(env.goroot(), "src", importPath),
|
||||||
importPathShort: importPath,
|
importPathShort: importPath,
|
||||||
packageName: path.Base(importPath),
|
packageName: path.Base(importPath),
|
||||||
relevance: MaxRelevance,
|
relevance: MaxRelevance,
|
||||||
|
@ -743,6 +744,8 @@ func getPackageExports(ctx context.Context, wrapped func(PackageExport), searchP
|
||||||
return getCandidatePkgs(ctx, callback, filename, filePkg, env)
|
return getCandidatePkgs(ctx, callback, filename, filePkg, env)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var RequiredGoEnvVars = []string{"GO111MODULE", "GOFLAGS", "GOINSECURE", "GOMOD", "GOMODCACHE", "GONOPROXY", "GONOSUMDB", "GOPATH", "GOPROXY", "GOROOT", "GOSUMDB"}
|
||||||
|
|
||||||
// ProcessEnv contains environment variables and settings that affect the use of
|
// ProcessEnv contains environment variables and settings that affect the use of
|
||||||
// the go command, the go/build package, etc.
|
// the go command, the go/build package, etc.
|
||||||
type ProcessEnv struct {
|
type ProcessEnv struct {
|
||||||
|
@ -752,9 +755,12 @@ type ProcessEnv struct {
|
||||||
|
|
||||||
BuildFlags []string
|
BuildFlags []string
|
||||||
|
|
||||||
// If non-empty, these will be used instead of the
|
// Env overrides the OS environment, and can be used to specify
|
||||||
// process-wide values.
|
// GOPROXY, GO111MODULE, etc. PATH cannot be set here, because
|
||||||
GOPATH, GOROOT, GO111MODULE, GOPROXY, GOFLAGS, GOSUMDB string
|
// exec.Command will not honor it.
|
||||||
|
// Specifying all of RequiredGoEnvVars avoids a call to `go env`.
|
||||||
|
Env map[string]string
|
||||||
|
|
||||||
WorkingDir string
|
WorkingDir string
|
||||||
|
|
||||||
// If Logf is non-nil, debug logging is enabled through this function.
|
// If Logf is non-nil, debug logging is enabled through this function.
|
||||||
|
@ -763,6 +769,22 @@ type ProcessEnv struct {
|
||||||
resolver Resolver
|
resolver Resolver
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (e *ProcessEnv) goroot() string {
|
||||||
|
return e.mustGetEnv("GOROOT")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ProcessEnv) gopath() string {
|
||||||
|
return e.mustGetEnv("GOPATH")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ProcessEnv) mustGetEnv(k string) string {
|
||||||
|
v, ok := e.Env[k]
|
||||||
|
if !ok {
|
||||||
|
panic(fmt.Sprintf("%v not set in evaluated environment", k))
|
||||||
|
}
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
// CopyConfig copies the env's configuration into a new env.
|
// CopyConfig copies the env's configuration into a new env.
|
||||||
func (e *ProcessEnv) CopyConfig() *ProcessEnv {
|
func (e *ProcessEnv) CopyConfig() *ProcessEnv {
|
||||||
copy := *e
|
copy := *e
|
||||||
|
@ -770,22 +792,41 @@ func (e *ProcessEnv) CopyConfig() *ProcessEnv {
|
||||||
return ©
|
return ©
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (e *ProcessEnv) init() error {
|
||||||
|
foundAllRequired := true
|
||||||
|
for _, k := range RequiredGoEnvVars {
|
||||||
|
if _, ok := e.Env[k]; !ok {
|
||||||
|
foundAllRequired = false
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if foundAllRequired {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if e.Env == nil {
|
||||||
|
e.Env = map[string]string{}
|
||||||
|
}
|
||||||
|
|
||||||
|
goEnv := map[string]string{}
|
||||||
|
stdout, err := e.invokeGo(context.TODO(), "env", append([]string{"-json"}, RequiredGoEnvVars...)...)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal(stdout.Bytes(), &goEnv); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for k, v := range goEnv {
|
||||||
|
e.Env[k] = v
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (e *ProcessEnv) env() []string {
|
func (e *ProcessEnv) env() []string {
|
||||||
env := os.Environ()
|
var env []string // the gocommand package will prepend os.Environ.
|
||||||
add := func(k, v string) {
|
for k, v := range e.Env {
|
||||||
if v != "" {
|
|
||||||
env = append(env, k+"="+v)
|
env = append(env, k+"="+v)
|
||||||
}
|
}
|
||||||
}
|
|
||||||
add("GOPATH", e.GOPATH)
|
|
||||||
add("GOROOT", e.GOROOT)
|
|
||||||
add("GO111MODULE", e.GO111MODULE)
|
|
||||||
add("GOPROXY", e.GOPROXY)
|
|
||||||
add("GOFLAGS", e.GOFLAGS)
|
|
||||||
add("GOSUMDB", e.GOSUMDB)
|
|
||||||
if e.WorkingDir != "" {
|
|
||||||
add("PWD", e.WorkingDir)
|
|
||||||
}
|
|
||||||
return env
|
return env
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -793,8 +834,7 @@ func (e *ProcessEnv) GetResolver() Resolver {
|
||||||
if e.resolver != nil {
|
if e.resolver != nil {
|
||||||
return e.resolver
|
return e.resolver
|
||||||
}
|
}
|
||||||
out, err := e.invokeGo(context.TODO(), "env", "GOMOD")
|
if len(e.Env["GOMOD"]) == 0 {
|
||||||
if err != nil || len(bytes.TrimSpace(out.Bytes())) == 0 {
|
|
||||||
e.resolver = newGopathResolver(e)
|
e.resolver = newGopathResolver(e)
|
||||||
return e.resolver
|
return e.resolver
|
||||||
}
|
}
|
||||||
|
@ -804,8 +844,8 @@ func (e *ProcessEnv) GetResolver() Resolver {
|
||||||
|
|
||||||
func (e *ProcessEnv) buildContext() *build.Context {
|
func (e *ProcessEnv) buildContext() *build.Context {
|
||||||
ctx := build.Default
|
ctx := build.Default
|
||||||
ctx.GOROOT = e.GOROOT
|
ctx.GOROOT = e.goroot()
|
||||||
ctx.GOPATH = e.GOPATH
|
ctx.GOPATH = e.gopath()
|
||||||
|
|
||||||
// As of Go 1.14, build.Context has a Dir field
|
// As of Go 1.14, build.Context has a Dir field
|
||||||
// (see golang.org/issue/34860).
|
// (see golang.org/issue/34860).
|
||||||
|
@ -839,7 +879,7 @@ func (e *ProcessEnv) invokeGo(ctx context.Context, verb string, args ...string)
|
||||||
func addStdlibCandidates(pass *pass, refs references) {
|
func addStdlibCandidates(pass *pass, refs references) {
|
||||||
add := func(pkg string) {
|
add := func(pkg string) {
|
||||||
// Prevent self-imports.
|
// Prevent self-imports.
|
||||||
if path.Base(pkg) == pass.f.Name.Name && filepath.Join(pass.env.GOROOT, "src", pkg) == pass.srcDir {
|
if path.Base(pkg) == pass.f.Name.Name && filepath.Join(pass.env.goroot(), "src", pkg) == pass.srcDir {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
exports := copyExports(stdlib[pkg])
|
exports := copyExports(stdlib[pkg])
|
||||||
|
|
15
vendor/golang.org/x/tools/internal/imports/imports.go
generated
vendored
15
vendor/golang.org/x/tools/internal/imports/imports.go
generated
vendored
|
@ -14,14 +14,12 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"go/ast"
|
"go/ast"
|
||||||
"go/build"
|
|
||||||
"go/format"
|
"go/format"
|
||||||
"go/parser"
|
"go/parser"
|
||||||
"go/printer"
|
"go/printer"
|
||||||
"go/token"
|
"go/token"
|
||||||
"io"
|
"io"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os"
|
|
||||||
"regexp"
|
"regexp"
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
@ -146,19 +144,16 @@ func initialize(filename string, src []byte, opt *Options) ([]byte, *Options, er
|
||||||
|
|
||||||
// Set the env if the user has not provided it.
|
// Set the env if the user has not provided it.
|
||||||
if opt.Env == nil {
|
if opt.Env == nil {
|
||||||
opt.Env = &ProcessEnv{
|
opt.Env = &ProcessEnv{}
|
||||||
GOPATH: build.Default.GOPATH,
|
|
||||||
GOROOT: build.Default.GOROOT,
|
|
||||||
GOFLAGS: os.Getenv("GOFLAGS"),
|
|
||||||
GO111MODULE: os.Getenv("GO111MODULE"),
|
|
||||||
GOPROXY: os.Getenv("GOPROXY"),
|
|
||||||
GOSUMDB: os.Getenv("GOSUMDB"),
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
// Set the gocmdRunner if the user has not provided it.
|
// Set the gocmdRunner if the user has not provided it.
|
||||||
if opt.Env.GocmdRunner == nil {
|
if opt.Env.GocmdRunner == nil {
|
||||||
opt.Env.GocmdRunner = &gocommand.Runner{}
|
opt.Env.GocmdRunner = &gocommand.Runner{}
|
||||||
}
|
}
|
||||||
|
if err := opt.Env.init(); err != nil {
|
||||||
|
return nil, nil, err
|
||||||
|
}
|
||||||
|
|
||||||
if src == nil {
|
if src == nil {
|
||||||
b, err := ioutil.ReadFile(filename)
|
b, err := ioutil.ReadFile(filename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
8
vendor/golang.org/x/tools/internal/imports/mod.go
generated
vendored
8
vendor/golang.org/x/tools/internal/imports/mod.go
generated
vendored
|
@ -79,7 +79,11 @@ func (r *ModuleResolver) init() error {
|
||||||
r.initAllMods()
|
r.initAllMods()
|
||||||
}
|
}
|
||||||
|
|
||||||
r.moduleCacheDir = filepath.Join(filepath.SplitList(r.env.GOPATH)[0], "/pkg/mod")
|
if gmc := r.env.Env["GOMODCACHE"]; gmc != "" {
|
||||||
|
r.moduleCacheDir = gmc
|
||||||
|
} else {
|
||||||
|
r.moduleCacheDir = filepath.Join(filepath.SplitList(r.env.gopath())[0], "/pkg/mod")
|
||||||
|
}
|
||||||
|
|
||||||
sort.Slice(r.modsByModPath, func(i, j int) bool {
|
sort.Slice(r.modsByModPath, func(i, j int) bool {
|
||||||
count := func(x int) int {
|
count := func(x int) int {
|
||||||
|
@ -95,7 +99,7 @@ func (r *ModuleResolver) init() error {
|
||||||
})
|
})
|
||||||
|
|
||||||
r.roots = []gopathwalk.Root{
|
r.roots = []gopathwalk.Root{
|
||||||
{filepath.Join(r.env.GOROOT, "/src"), gopathwalk.RootGOROOT},
|
{filepath.Join(r.env.goroot(), "/src"), gopathwalk.RootGOROOT},
|
||||||
}
|
}
|
||||||
if r.main != nil {
|
if r.main != nil {
|
||||||
r.roots = append(r.roots, gopathwalk.Root{r.main.Dir, gopathwalk.RootCurrentModule})
|
r.roots = append(r.roots, gopathwalk.Root{r.main.Dir, gopathwalk.RootCurrentModule})
|
||||||
|
|
14
vendor/golang.org/x/tools/internal/packagesinternal/packages.go
generated
vendored
14
vendor/golang.org/x/tools/internal/packagesinternal/packages.go
generated
vendored
|
@ -1,14 +0,0 @@
|
||||||
// Package packagesinternal exposes internal-only fields from go/packages.
|
|
||||||
package packagesinternal
|
|
||||||
|
|
||||||
import (
|
|
||||||
"golang.org/x/tools/internal/gocommand"
|
|
||||||
)
|
|
||||||
|
|
||||||
var GetForTest = func(p interface{}) string { return "" }
|
|
||||||
|
|
||||||
var GetGoCmdRunner = func(config interface{}) *gocommand.Runner { return nil }
|
|
||||||
|
|
||||||
var SetGoCmdRunner = func(config interface{}, runner *gocommand.Runner) {}
|
|
||||||
|
|
||||||
var TypecheckCgo int
|
|
28
vendor/golang.org/x/tools/internal/typesinternal/types.go
generated
vendored
28
vendor/golang.org/x/tools/internal/typesinternal/types.go
generated
vendored
|
@ -1,28 +0,0 @@
|
||||||
// Copyright 2020 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package typesinternal
|
|
||||||
|
|
||||||
import (
|
|
||||||
"go/types"
|
|
||||||
"reflect"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
func SetUsesCgo(conf *types.Config) bool {
|
|
||||||
v := reflect.ValueOf(conf).Elem()
|
|
||||||
|
|
||||||
f := v.FieldByName("go115UsesCgo")
|
|
||||||
if !f.IsValid() {
|
|
||||||
f = v.FieldByName("UsesCgo")
|
|
||||||
if !f.IsValid() {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
addr := unsafe.Pointer(f.UnsafeAddr())
|
|
||||||
*(*bool)(addr) = true
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
14
vendor/google.golang.org/grpc/.travis.yml
generated
vendored
14
vendor/google.golang.org/grpc/.travis.yml
generated
vendored
|
@ -2,19 +2,19 @@ language: go
|
||||||
|
|
||||||
matrix:
|
matrix:
|
||||||
include:
|
include:
|
||||||
- go: 1.13.x
|
- go: 1.14.x
|
||||||
env: VET=1 GO111MODULE=on
|
env: VET=1 GO111MODULE=on
|
||||||
- go: 1.13.x
|
- go: 1.14.x
|
||||||
env: RACE=1 GO111MODULE=on
|
env: RACE=1 GO111MODULE=on
|
||||||
- go: 1.13.x
|
- go: 1.14.x
|
||||||
env: RUN386=1
|
env: RUN386=1
|
||||||
- go: 1.13.x
|
- go: 1.14.x
|
||||||
env: GRPC_GO_RETRY=on
|
env: GRPC_GO_RETRY=on
|
||||||
- go: 1.13.x
|
- go: 1.14.x
|
||||||
env: TESTEXTRAS=1
|
env: TESTEXTRAS=1
|
||||||
- go: 1.12.x
|
- go: 1.13.x
|
||||||
env: GO111MODULE=on
|
env: GO111MODULE=on
|
||||||
- go: 1.11.x
|
- go: 1.12.x
|
||||||
env: GO111MODULE=on
|
env: GO111MODULE=on
|
||||||
- go: 1.9.x
|
- go: 1.9.x
|
||||||
env: GAE=1
|
env: GAE=1
|
||||||
|
|
5
vendor/google.golang.org/grpc/README.md
generated
vendored
5
vendor/google.golang.org/grpc/README.md
generated
vendored
|
@ -7,7 +7,7 @@
|
||||||
The Go implementation of [gRPC](https://grpc.io/): A high performance, open
|
The Go implementation of [gRPC](https://grpc.io/): A high performance, open
|
||||||
source, general RPC framework that puts mobile and HTTP/2 first. For more
|
source, general RPC framework that puts mobile and HTTP/2 first. For more
|
||||||
information see the [gRPC Quick Start:
|
information see the [gRPC Quick Start:
|
||||||
Go](https://grpc.io/docs/quickstart/go.html) guide.
|
Go](https://grpc.io/docs/languages/go/quickstart/) guide.
|
||||||
|
|
||||||
Installation
|
Installation
|
||||||
------------
|
------------
|
||||||
|
@ -29,7 +29,8 @@ If you are trying to access grpc-go from within China, please see the
|
||||||
|
|
||||||
Prerequisites
|
Prerequisites
|
||||||
-------------
|
-------------
|
||||||
gRPC-Go requires Go 1.9 or later.
|
gRPC-Go officially supports the
|
||||||
|
[three latest major releases of Go](https://golang.org/doc/devel/release.html).
|
||||||
|
|
||||||
Documentation
|
Documentation
|
||||||
-------------
|
-------------
|
||||||
|
|
6
vendor/google.golang.org/grpc/attributes/attributes.go
generated
vendored
6
vendor/google.golang.org/grpc/attributes/attributes.go
generated
vendored
|
@ -50,6 +50,9 @@ func New(kvs ...interface{}) *Attributes {
|
||||||
// times, the last value overwrites all previous values for that key. To
|
// times, the last value overwrites all previous values for that key. To
|
||||||
// remove an existing key, use a nil value.
|
// remove an existing key, use a nil value.
|
||||||
func (a *Attributes) WithValues(kvs ...interface{}) *Attributes {
|
func (a *Attributes) WithValues(kvs ...interface{}) *Attributes {
|
||||||
|
if a == nil {
|
||||||
|
return New(kvs...)
|
||||||
|
}
|
||||||
if len(kvs)%2 != 0 {
|
if len(kvs)%2 != 0 {
|
||||||
panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs)))
|
panic(fmt.Sprintf("attributes.New called with unexpected input: len(kvs) = %v", len(kvs)))
|
||||||
}
|
}
|
||||||
|
@ -66,5 +69,8 @@ func (a *Attributes) WithValues(kvs ...interface{}) *Attributes {
|
||||||
// Value returns the value associated with these attributes for key, or nil if
|
// Value returns the value associated with these attributes for key, or nil if
|
||||||
// no value is associated with key.
|
// no value is associated with key.
|
||||||
func (a *Attributes) Value(key interface{}) interface{} {
|
func (a *Attributes) Value(key interface{}) interface{} {
|
||||||
|
if a == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
return a.m[key]
|
return a.m[key]
|
||||||
}
|
}
|
||||||
|
|
391
vendor/google.golang.org/grpc/balancer.go
generated
vendored
391
vendor/google.golang.org/grpc/balancer.go
generated
vendored
|
@ -1,391 +0,0 @@
|
||||||
/*
|
|
||||||
*
|
|
||||||
* Copyright 2016 gRPC authors.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
package grpc
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"net"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"google.golang.org/grpc/codes"
|
|
||||||
"google.golang.org/grpc/credentials"
|
|
||||||
"google.golang.org/grpc/grpclog"
|
|
||||||
"google.golang.org/grpc/naming"
|
|
||||||
"google.golang.org/grpc/status"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Address represents a server the client connects to.
|
|
||||||
//
|
|
||||||
// Deprecated: please use package balancer.
|
|
||||||
type Address struct {
|
|
||||||
// Addr is the server address on which a connection will be established.
|
|
||||||
Addr string
|
|
||||||
// Metadata is the information associated with Addr, which may be used
|
|
||||||
// to make load balancing decision.
|
|
||||||
Metadata interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// BalancerConfig specifies the configurations for Balancer.
|
|
||||||
//
|
|
||||||
// Deprecated: please use package balancer. May be removed in a future 1.x release.
|
|
||||||
type BalancerConfig struct {
|
|
||||||
// DialCreds is the transport credential the Balancer implementation can
|
|
||||||
// use to dial to a remote load balancer server. The Balancer implementations
|
|
||||||
// can ignore this if it does not need to talk to another party securely.
|
|
||||||
DialCreds credentials.TransportCredentials
|
|
||||||
// Dialer is the custom dialer the Balancer implementation can use to dial
|
|
||||||
// to a remote load balancer server. The Balancer implementations
|
|
||||||
// can ignore this if it doesn't need to talk to remote balancer.
|
|
||||||
Dialer func(context.Context, string) (net.Conn, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// BalancerGetOptions configures a Get call.
|
|
||||||
//
|
|
||||||
// Deprecated: please use package balancer. May be removed in a future 1.x release.
|
|
||||||
type BalancerGetOptions struct {
|
|
||||||
// BlockingWait specifies whether Get should block when there is no
|
|
||||||
// connected address.
|
|
||||||
BlockingWait bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Balancer chooses network addresses for RPCs.
|
|
||||||
//
|
|
||||||
// Deprecated: please use package balancer. May be removed in a future 1.x release.
|
|
||||||
type Balancer interface {
|
|
||||||
// Start does the initialization work to bootstrap a Balancer. For example,
|
|
||||||
// this function may start the name resolution and watch the updates. It will
|
|
||||||
// be called when dialing.
|
|
||||||
Start(target string, config BalancerConfig) error
|
|
||||||
// Up informs the Balancer that gRPC has a connection to the server at
|
|
||||||
// addr. It returns down which is called once the connection to addr gets
|
|
||||||
// lost or closed.
|
|
||||||
// TODO: It is not clear how to construct and take advantage of the meaningful error
|
|
||||||
// parameter for down. Need realistic demands to guide.
|
|
||||||
Up(addr Address) (down func(error))
|
|
||||||
// Get gets the address of a server for the RPC corresponding to ctx.
|
|
||||||
// i) If it returns a connected address, gRPC internals issues the RPC on the
|
|
||||||
// connection to this address;
|
|
||||||
// ii) If it returns an address on which the connection is under construction
|
|
||||||
// (initiated by Notify(...)) but not connected, gRPC internals
|
|
||||||
// * fails RPC if the RPC is fail-fast and connection is in the TransientFailure or
|
|
||||||
// Shutdown state;
|
|
||||||
// or
|
|
||||||
// * issues RPC on the connection otherwise.
|
|
||||||
// iii) If it returns an address on which the connection does not exist, gRPC
|
|
||||||
// internals treats it as an error and will fail the corresponding RPC.
|
|
||||||
//
|
|
||||||
// Therefore, the following is the recommended rule when writing a custom Balancer.
|
|
||||||
// If opts.BlockingWait is true, it should return a connected address or
|
|
||||||
// block if there is no connected address. It should respect the timeout or
|
|
||||||
// cancellation of ctx when blocking. If opts.BlockingWait is false (for fail-fast
|
|
||||||
// RPCs), it should return an address it has notified via Notify(...) immediately
|
|
||||||
// instead of blocking.
|
|
||||||
//
|
|
||||||
// The function returns put which is called once the rpc has completed or failed.
|
|
||||||
// put can collect and report RPC stats to a remote load balancer.
|
|
||||||
//
|
|
||||||
// This function should only return the errors Balancer cannot recover by itself.
|
|
||||||
// gRPC internals will fail the RPC if an error is returned.
|
|
||||||
Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error)
|
|
||||||
// Notify returns a channel that is used by gRPC internals to watch the addresses
|
|
||||||
// gRPC needs to connect. The addresses might be from a name resolver or remote
|
|
||||||
// load balancer. gRPC internals will compare it with the existing connected
|
|
||||||
// addresses. If the address Balancer notified is not in the existing connected
|
|
||||||
// addresses, gRPC starts to connect the address. If an address in the existing
|
|
||||||
// connected addresses is not in the notification list, the corresponding connection
|
|
||||||
// is shutdown gracefully. Otherwise, there are no operations to take. Note that
|
|
||||||
// the Address slice must be the full list of the Addresses which should be connected.
|
|
||||||
// It is NOT delta.
|
|
||||||
Notify() <-chan []Address
|
|
||||||
// Close shuts down the balancer.
|
|
||||||
Close() error
|
|
||||||
}
|
|
||||||
|
|
||||||
// RoundRobin returns a Balancer that selects addresses round-robin. It uses r to watch
|
|
||||||
// the name resolution updates and updates the addresses available correspondingly.
|
|
||||||
//
|
|
||||||
// Deprecated: please use package balancer/roundrobin. May be removed in a future 1.x release.
|
|
||||||
func RoundRobin(r naming.Resolver) Balancer {
|
|
||||||
return &roundRobin{r: r}
|
|
||||||
}
|
|
||||||
|
|
||||||
type addrInfo struct {
|
|
||||||
addr Address
|
|
||||||
connected bool
|
|
||||||
}
|
|
||||||
|
|
||||||
type roundRobin struct {
|
|
||||||
r naming.Resolver
|
|
||||||
w naming.Watcher
|
|
||||||
addrs []*addrInfo // all the addresses the client should potentially connect
|
|
||||||
mu sync.Mutex
|
|
||||||
addrCh chan []Address // the channel to notify gRPC internals the list of addresses the client should connect to.
|
|
||||||
next int // index of the next address to return for Get()
|
|
||||||
waitCh chan struct{} // the channel to block when there is no connected address available
|
|
||||||
done bool // The Balancer is closed.
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rr *roundRobin) watchAddrUpdates() error {
|
|
||||||
updates, err := rr.w.Next()
|
|
||||||
if err != nil {
|
|
||||||
grpclog.Warningf("grpc: the naming watcher stops working due to %v.", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
rr.mu.Lock()
|
|
||||||
defer rr.mu.Unlock()
|
|
||||||
for _, update := range updates {
|
|
||||||
addr := Address{
|
|
||||||
Addr: update.Addr,
|
|
||||||
Metadata: update.Metadata,
|
|
||||||
}
|
|
||||||
switch update.Op {
|
|
||||||
case naming.Add:
|
|
||||||
var exist bool
|
|
||||||
for _, v := range rr.addrs {
|
|
||||||
if addr == v.addr {
|
|
||||||
exist = true
|
|
||||||
grpclog.Infoln("grpc: The name resolver wanted to add an existing address: ", addr)
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if exist {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
rr.addrs = append(rr.addrs, &addrInfo{addr: addr})
|
|
||||||
case naming.Delete:
|
|
||||||
for i, v := range rr.addrs {
|
|
||||||
if addr == v.addr {
|
|
||||||
copy(rr.addrs[i:], rr.addrs[i+1:])
|
|
||||||
rr.addrs = rr.addrs[:len(rr.addrs)-1]
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
grpclog.Errorln("Unknown update.Op ", update.Op)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Make a copy of rr.addrs and write it onto rr.addrCh so that gRPC internals gets notified.
|
|
||||||
open := make([]Address, len(rr.addrs))
|
|
||||||
for i, v := range rr.addrs {
|
|
||||||
open[i] = v.addr
|
|
||||||
}
|
|
||||||
if rr.done {
|
|
||||||
return ErrClientConnClosing
|
|
||||||
}
|
|
||||||
select {
|
|
||||||
case <-rr.addrCh:
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
rr.addrCh <- open
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rr *roundRobin) Start(target string, config BalancerConfig) error {
|
|
||||||
rr.mu.Lock()
|
|
||||||
defer rr.mu.Unlock()
|
|
||||||
if rr.done {
|
|
||||||
return ErrClientConnClosing
|
|
||||||
}
|
|
||||||
if rr.r == nil {
|
|
||||||
// If there is no name resolver installed, it is not needed to
|
|
||||||
// do name resolution. In this case, target is added into rr.addrs
|
|
||||||
// as the only address available and rr.addrCh stays nil.
|
|
||||||
rr.addrs = append(rr.addrs, &addrInfo{addr: Address{Addr: target}})
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
w, err := rr.r.Resolve(target)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
rr.w = w
|
|
||||||
rr.addrCh = make(chan []Address, 1)
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
if err := rr.watchAddrUpdates(); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Up sets the connected state of addr and sends notification if there are pending
|
|
||||||
// Get() calls.
|
|
||||||
func (rr *roundRobin) Up(addr Address) func(error) {
|
|
||||||
rr.mu.Lock()
|
|
||||||
defer rr.mu.Unlock()
|
|
||||||
var cnt int
|
|
||||||
for _, a := range rr.addrs {
|
|
||||||
if a.addr == addr {
|
|
||||||
if a.connected {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
a.connected = true
|
|
||||||
}
|
|
||||||
if a.connected {
|
|
||||||
cnt++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// addr is only one which is connected. Notify the Get() callers who are blocking.
|
|
||||||
if cnt == 1 && rr.waitCh != nil {
|
|
||||||
close(rr.waitCh)
|
|
||||||
rr.waitCh = nil
|
|
||||||
}
|
|
||||||
return func(err error) {
|
|
||||||
rr.down(addr, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// down unsets the connected state of addr.
|
|
||||||
func (rr *roundRobin) down(addr Address, err error) {
|
|
||||||
rr.mu.Lock()
|
|
||||||
defer rr.mu.Unlock()
|
|
||||||
for _, a := range rr.addrs {
|
|
||||||
if addr == a.addr {
|
|
||||||
a.connected = false
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get returns the next addr in the rotation.
|
|
||||||
func (rr *roundRobin) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) {
|
|
||||||
var ch chan struct{}
|
|
||||||
rr.mu.Lock()
|
|
||||||
if rr.done {
|
|
||||||
rr.mu.Unlock()
|
|
||||||
err = ErrClientConnClosing
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(rr.addrs) > 0 {
|
|
||||||
if rr.next >= len(rr.addrs) {
|
|
||||||
rr.next = 0
|
|
||||||
}
|
|
||||||
next := rr.next
|
|
||||||
for {
|
|
||||||
a := rr.addrs[next]
|
|
||||||
next = (next + 1) % len(rr.addrs)
|
|
||||||
if a.connected {
|
|
||||||
addr = a.addr
|
|
||||||
rr.next = next
|
|
||||||
rr.mu.Unlock()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if next == rr.next {
|
|
||||||
// Has iterated all the possible address but none is connected.
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if !opts.BlockingWait {
|
|
||||||
if len(rr.addrs) == 0 {
|
|
||||||
rr.mu.Unlock()
|
|
||||||
err = status.Errorf(codes.Unavailable, "there is no address available")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Returns the next addr on rr.addrs for failfast RPCs.
|
|
||||||
addr = rr.addrs[rr.next].addr
|
|
||||||
rr.next++
|
|
||||||
rr.mu.Unlock()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Wait on rr.waitCh for non-failfast RPCs.
|
|
||||||
if rr.waitCh == nil {
|
|
||||||
ch = make(chan struct{})
|
|
||||||
rr.waitCh = ch
|
|
||||||
} else {
|
|
||||||
ch = rr.waitCh
|
|
||||||
}
|
|
||||||
rr.mu.Unlock()
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
err = ctx.Err()
|
|
||||||
return
|
|
||||||
case <-ch:
|
|
||||||
rr.mu.Lock()
|
|
||||||
if rr.done {
|
|
||||||
rr.mu.Unlock()
|
|
||||||
err = ErrClientConnClosing
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(rr.addrs) > 0 {
|
|
||||||
if rr.next >= len(rr.addrs) {
|
|
||||||
rr.next = 0
|
|
||||||
}
|
|
||||||
next := rr.next
|
|
||||||
for {
|
|
||||||
a := rr.addrs[next]
|
|
||||||
next = (next + 1) % len(rr.addrs)
|
|
||||||
if a.connected {
|
|
||||||
addr = a.addr
|
|
||||||
rr.next = next
|
|
||||||
rr.mu.Unlock()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if next == rr.next {
|
|
||||||
// Has iterated all the possible address but none is connected.
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// The newly added addr got removed by Down() again.
|
|
||||||
if rr.waitCh == nil {
|
|
||||||
ch = make(chan struct{})
|
|
||||||
rr.waitCh = ch
|
|
||||||
} else {
|
|
||||||
ch = rr.waitCh
|
|
||||||
}
|
|
||||||
rr.mu.Unlock()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rr *roundRobin) Notify() <-chan []Address {
|
|
||||||
return rr.addrCh
|
|
||||||
}
|
|
||||||
|
|
||||||
func (rr *roundRobin) Close() error {
|
|
||||||
rr.mu.Lock()
|
|
||||||
defer rr.mu.Unlock()
|
|
||||||
if rr.done {
|
|
||||||
return errBalancerClosed
|
|
||||||
}
|
|
||||||
rr.done = true
|
|
||||||
if rr.w != nil {
|
|
||||||
rr.w.Close()
|
|
||||||
}
|
|
||||||
if rr.waitCh != nil {
|
|
||||||
close(rr.waitCh)
|
|
||||||
rr.waitCh = nil
|
|
||||||
}
|
|
||||||
if rr.addrCh != nil {
|
|
||||||
close(rr.addrCh)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// pickFirst is used to test multi-addresses in one addrConn in which all addresses share the same addrConn.
|
|
||||||
// It is a wrapper around roundRobin balancer. The logic of all methods works fine because balancer.Get()
|
|
||||||
// returns the only address Up by resetTransport().
|
|
||||||
type pickFirst struct {
|
|
||||||
*roundRobin
|
|
||||||
}
|
|
172
vendor/google.golang.org/grpc/balancer/balancer.go
generated
vendored
172
vendor/google.golang.org/grpc/balancer/balancer.go
generated
vendored
|
@ -111,6 +111,9 @@ type NewSubConnOptions struct {
|
||||||
// CredsBundle is the credentials bundle that will be used in the created
|
// CredsBundle is the credentials bundle that will be used in the created
|
||||||
// SubConn. If it's nil, the original creds from grpc DialOptions will be
|
// SubConn. If it's nil, the original creds from grpc DialOptions will be
|
||||||
// used.
|
// used.
|
||||||
|
//
|
||||||
|
// Deprecated: Use the Attributes field in resolver.Address to pass
|
||||||
|
// arbitrary data to the credential handshaker.
|
||||||
CredsBundle credentials.Bundle
|
CredsBundle credentials.Bundle
|
||||||
// HealthCheckEnabled indicates whether health check service should be
|
// HealthCheckEnabled indicates whether health check service should be
|
||||||
// enabled on this SubConn
|
// enabled on this SubConn
|
||||||
|
@ -123,7 +126,7 @@ type State struct {
|
||||||
// determine the state of the ClientConn.
|
// determine the state of the ClientConn.
|
||||||
ConnectivityState connectivity.State
|
ConnectivityState connectivity.State
|
||||||
// Picker is used to choose connections (SubConns) for RPCs.
|
// Picker is used to choose connections (SubConns) for RPCs.
|
||||||
Picker V2Picker
|
Picker Picker
|
||||||
}
|
}
|
||||||
|
|
||||||
// ClientConn represents a gRPC ClientConn.
|
// ClientConn represents a gRPC ClientConn.
|
||||||
|
@ -141,20 +144,11 @@ type ClientConn interface {
|
||||||
// The SubConn will be shutdown.
|
// The SubConn will be shutdown.
|
||||||
RemoveSubConn(SubConn)
|
RemoveSubConn(SubConn)
|
||||||
|
|
||||||
// UpdateBalancerState is called by balancer to notify gRPC that some internal
|
|
||||||
// state in balancer has changed.
|
|
||||||
//
|
|
||||||
// gRPC will update the connectivity state of the ClientConn, and will call pick
|
|
||||||
// on the new picker to pick new SubConn.
|
|
||||||
//
|
|
||||||
// Deprecated: use UpdateState instead
|
|
||||||
UpdateBalancerState(s connectivity.State, p Picker)
|
|
||||||
|
|
||||||
// UpdateState notifies gRPC that the balancer's internal state has
|
// UpdateState notifies gRPC that the balancer's internal state has
|
||||||
// changed.
|
// changed.
|
||||||
//
|
//
|
||||||
// gRPC will update the connectivity state of the ClientConn, and will call pick
|
// gRPC will update the connectivity state of the ClientConn, and will call
|
||||||
// on the new picker to pick new SubConns.
|
// Pick on the new Picker to pick new SubConns.
|
||||||
UpdateState(State)
|
UpdateState(State)
|
||||||
|
|
||||||
// ResolveNow is called by balancer to notify gRPC to do a name resolving.
|
// ResolveNow is called by balancer to notify gRPC to do a name resolving.
|
||||||
|
@ -232,56 +226,17 @@ type DoneInfo struct {
|
||||||
|
|
||||||
var (
|
var (
|
||||||
// ErrNoSubConnAvailable indicates no SubConn is available for pick().
|
// ErrNoSubConnAvailable indicates no SubConn is available for pick().
|
||||||
// gRPC will block the RPC until a new picker is available via UpdateBalancerState().
|
// gRPC will block the RPC until a new picker is available via UpdateState().
|
||||||
ErrNoSubConnAvailable = errors.New("no SubConn is available")
|
ErrNoSubConnAvailable = errors.New("no SubConn is available")
|
||||||
// ErrTransientFailure indicates all SubConns are in TransientFailure.
|
// ErrTransientFailure indicates all SubConns are in TransientFailure.
|
||||||
// WaitForReady RPCs will block, non-WaitForReady RPCs will fail.
|
// WaitForReady RPCs will block, non-WaitForReady RPCs will fail.
|
||||||
ErrTransientFailure = TransientFailureError(errors.New("all SubConns are in TransientFailure"))
|
//
|
||||||
|
// Deprecated: return an appropriate error based on the last resolution or
|
||||||
|
// connection attempt instead. The behavior is the same for any non-gRPC
|
||||||
|
// status error.
|
||||||
|
ErrTransientFailure = errors.New("all SubConns are in TransientFailure")
|
||||||
)
|
)
|
||||||
|
|
||||||
// Picker is used by gRPC to pick a SubConn to send an RPC.
|
|
||||||
// Balancer is expected to generate a new picker from its snapshot every time its
|
|
||||||
// internal state has changed.
|
|
||||||
//
|
|
||||||
// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState().
|
|
||||||
//
|
|
||||||
// Deprecated: use V2Picker instead
|
|
||||||
type Picker interface {
|
|
||||||
// Pick returns the SubConn to be used to send the RPC.
|
|
||||||
// The returned SubConn must be one returned by NewSubConn().
|
|
||||||
//
|
|
||||||
// This functions is expected to return:
|
|
||||||
// - a SubConn that is known to be READY;
|
|
||||||
// - ErrNoSubConnAvailable if no SubConn is available, but progress is being
|
|
||||||
// made (for example, some SubConn is in CONNECTING mode);
|
|
||||||
// - other errors if no active connecting is happening (for example, all SubConn
|
|
||||||
// are in TRANSIENT_FAILURE mode).
|
|
||||||
//
|
|
||||||
// If a SubConn is returned:
|
|
||||||
// - If it is READY, gRPC will send the RPC on it;
|
|
||||||
// - If it is not ready, or becomes not ready after it's returned, gRPC will
|
|
||||||
// block until UpdateBalancerState() is called and will call pick on the
|
|
||||||
// new picker. The done function returned from Pick(), if not nil, will be
|
|
||||||
// called with nil error, no bytes sent and no bytes received.
|
|
||||||
//
|
|
||||||
// If the returned error is not nil:
|
|
||||||
// - If the error is ErrNoSubConnAvailable, gRPC will block until UpdateBalancerState()
|
|
||||||
// - If the error is ErrTransientFailure or implements IsTransientFailure()
|
|
||||||
// bool, returning true:
|
|
||||||
// - If the RPC is wait-for-ready, gRPC will block until UpdateBalancerState()
|
|
||||||
// is called to pick again;
|
|
||||||
// - Otherwise, RPC will fail with unavailable error.
|
|
||||||
// - Else (error is other non-nil error):
|
|
||||||
// - The RPC will fail with the error's status code, or Unknown if it is
|
|
||||||
// not a status error.
|
|
||||||
//
|
|
||||||
// The returned done() function will be called once the rpc has finished,
|
|
||||||
// with the final status of that RPC. If the SubConn returned is not a
|
|
||||||
// valid SubConn type, done may not be called. done may be nil if balancer
|
|
||||||
// doesn't care about the RPC status.
|
|
||||||
Pick(ctx context.Context, info PickInfo) (conn SubConn, done func(DoneInfo), err error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PickResult contains information related to a connection chosen for an RPC.
|
// PickResult contains information related to a connection chosen for an RPC.
|
||||||
type PickResult struct {
|
type PickResult struct {
|
||||||
// SubConn is the connection to use for this pick, if its state is Ready.
|
// SubConn is the connection to use for this pick, if its state is Ready.
|
||||||
|
@ -297,24 +252,19 @@ type PickResult struct {
|
||||||
Done func(DoneInfo)
|
Done func(DoneInfo)
|
||||||
}
|
}
|
||||||
|
|
||||||
type transientFailureError struct {
|
// TransientFailureError returns e. It exists for backward compatibility and
|
||||||
error
|
// will be deleted soon.
|
||||||
}
|
//
|
||||||
|
// Deprecated: no longer necessary, picker errors are treated this way by
|
||||||
|
// default.
|
||||||
|
func TransientFailureError(e error) error { return e }
|
||||||
|
|
||||||
func (e *transientFailureError) IsTransientFailure() bool { return true }
|
// Picker is used by gRPC to pick a SubConn to send an RPC.
|
||||||
|
|
||||||
// TransientFailureError wraps err in an error implementing
|
|
||||||
// IsTransientFailure() bool, returning true.
|
|
||||||
func TransientFailureError(err error) error {
|
|
||||||
return &transientFailureError{error: err}
|
|
||||||
}
|
|
||||||
|
|
||||||
// V2Picker is used by gRPC to pick a SubConn to send an RPC.
|
|
||||||
// Balancer is expected to generate a new picker from its snapshot every time its
|
// Balancer is expected to generate a new picker from its snapshot every time its
|
||||||
// internal state has changed.
|
// internal state has changed.
|
||||||
//
|
//
|
||||||
// The pickers used by gRPC can be updated by ClientConn.UpdateBalancerState().
|
// The pickers used by gRPC can be updated by ClientConn.UpdateState().
|
||||||
type V2Picker interface {
|
type Picker interface {
|
||||||
// Pick returns the connection to use for this RPC and related information.
|
// Pick returns the connection to use for this RPC and related information.
|
||||||
//
|
//
|
||||||
// Pick should not block. If the balancer needs to do I/O or any blocking
|
// Pick should not block. If the balancer needs to do I/O or any blocking
|
||||||
|
@ -327,14 +277,13 @@ type V2Picker interface {
|
||||||
// - If the error is ErrNoSubConnAvailable, gRPC will block until a new
|
// - If the error is ErrNoSubConnAvailable, gRPC will block until a new
|
||||||
// Picker is provided by the balancer (using ClientConn.UpdateState).
|
// Picker is provided by the balancer (using ClientConn.UpdateState).
|
||||||
//
|
//
|
||||||
// - If the error implements IsTransientFailure() bool, returning true,
|
// - If the error is a status error (implemented by the grpc/status
|
||||||
// wait for ready RPCs will wait, but non-wait for ready RPCs will be
|
// package), gRPC will terminate the RPC with the code and message
|
||||||
// terminated with this error's Error() string and status code
|
// provided.
|
||||||
// Unavailable.
|
|
||||||
//
|
//
|
||||||
// - Any other errors terminate all RPCs with the code and message
|
// - For all other errors, wait for ready RPCs will wait, but non-wait for
|
||||||
// provided. If the error is not a status error, it will be converted by
|
// ready RPCs will be terminated with this error's Error() string and
|
||||||
// gRPC to a status error with code Unknown.
|
// status code Unavailable.
|
||||||
Pick(info PickInfo) (PickResult, error)
|
Pick(info PickInfo) (PickResult, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -343,34 +292,36 @@ type V2Picker interface {
|
||||||
//
|
//
|
||||||
// It also generates and updates the Picker used by gRPC to pick SubConns for RPCs.
|
// It also generates and updates the Picker used by gRPC to pick SubConns for RPCs.
|
||||||
//
|
//
|
||||||
// HandleSubConnectionStateChange, HandleResolvedAddrs and Close are guaranteed
|
// UpdateClientConnState, ResolverError, UpdateSubConnState, and Close are
|
||||||
// to be called synchronously from the same goroutine.
|
// guaranteed to be called synchronously from the same goroutine. There's no
|
||||||
// There's no guarantee on picker.Pick, it may be called anytime.
|
// guarantee on picker.Pick, it may be called anytime.
|
||||||
type Balancer interface {
|
type Balancer interface {
|
||||||
// HandleSubConnStateChange is called by gRPC when the connectivity state
|
// UpdateClientConnState is called by gRPC when the state of the ClientConn
|
||||||
// of sc has changed.
|
// changes. If the error returned is ErrBadResolverState, the ClientConn
|
||||||
// Balancer is expected to aggregate all the state of SubConn and report
|
// will begin calling ResolveNow on the active name resolver with
|
||||||
// that back to gRPC.
|
// exponential backoff until a subsequent call to UpdateClientConnState
|
||||||
// Balancer should also generate and update Pickers when its internal state has
|
// returns a nil error. Any other errors are currently ignored.
|
||||||
// been changed by the new state.
|
UpdateClientConnState(ClientConnState) error
|
||||||
//
|
// ResolverError is called by gRPC when the name resolver reports an error.
|
||||||
// Deprecated: if V2Balancer is implemented by the Balancer,
|
ResolverError(error)
|
||||||
// UpdateSubConnState will be called instead.
|
// UpdateSubConnState is called by gRPC when the state of a SubConn
|
||||||
HandleSubConnStateChange(sc SubConn, state connectivity.State)
|
// changes.
|
||||||
// HandleResolvedAddrs is called by gRPC to send updated resolved addresses to
|
UpdateSubConnState(SubConn, SubConnState)
|
||||||
// balancers.
|
|
||||||
// Balancer can create new SubConn or remove SubConn with the addresses.
|
|
||||||
// An empty address slice and a non-nil error will be passed if the resolver returns
|
|
||||||
// non-nil error to gRPC.
|
|
||||||
//
|
|
||||||
// Deprecated: if V2Balancer is implemented by the Balancer,
|
|
||||||
// UpdateClientConnState will be called instead.
|
|
||||||
HandleResolvedAddrs([]resolver.Address, error)
|
|
||||||
// Close closes the balancer. The balancer is not required to call
|
// Close closes the balancer. The balancer is not required to call
|
||||||
// ClientConn.RemoveSubConn for its existing SubConns.
|
// ClientConn.RemoveSubConn for its existing SubConns.
|
||||||
Close()
|
Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// V2Balancer is temporarily defined for backward compatibility reasons.
|
||||||
|
//
|
||||||
|
// Deprecated: use Balancer directly instead.
|
||||||
|
type V2Balancer = Balancer
|
||||||
|
|
||||||
|
// V2Picker is temporarily defined for backward compatibility reasons.
|
||||||
|
//
|
||||||
|
// Deprecated: use Picker directly instead.
|
||||||
|
type V2Picker = Picker
|
||||||
|
|
||||||
// SubConnState describes the state of a SubConn.
|
// SubConnState describes the state of a SubConn.
|
||||||
type SubConnState struct {
|
type SubConnState struct {
|
||||||
// ConnectivityState is the connectivity state of the SubConn.
|
// ConnectivityState is the connectivity state of the SubConn.
|
||||||
|
@ -393,27 +344,6 @@ type ClientConnState struct {
|
||||||
// problem with the provided name resolver data.
|
// problem with the provided name resolver data.
|
||||||
var ErrBadResolverState = errors.New("bad resolver state")
|
var ErrBadResolverState = errors.New("bad resolver state")
|
||||||
|
|
||||||
// V2Balancer is defined for documentation purposes. If a Balancer also
|
|
||||||
// implements V2Balancer, its UpdateClientConnState method will be called
|
|
||||||
// instead of HandleResolvedAddrs and its UpdateSubConnState will be called
|
|
||||||
// instead of HandleSubConnStateChange.
|
|
||||||
type V2Balancer interface {
|
|
||||||
// UpdateClientConnState is called by gRPC when the state of the ClientConn
|
|
||||||
// changes. If the error returned is ErrBadResolverState, the ClientConn
|
|
||||||
// will begin calling ResolveNow on the active name resolver with
|
|
||||||
// exponential backoff until a subsequent call to UpdateClientConnState
|
|
||||||
// returns a nil error. Any other errors are currently ignored.
|
|
||||||
UpdateClientConnState(ClientConnState) error
|
|
||||||
// ResolverError is called by gRPC when the name resolver reports an error.
|
|
||||||
ResolverError(error)
|
|
||||||
// UpdateSubConnState is called by gRPC when the state of a SubConn
|
|
||||||
// changes.
|
|
||||||
UpdateSubConnState(SubConn, SubConnState)
|
|
||||||
// Close closes the balancer. The balancer is not required to call
|
|
||||||
// ClientConn.RemoveSubConn for its existing SubConns.
|
|
||||||
Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns
|
// ConnectivityStateEvaluator takes the connectivity states of multiple SubConns
|
||||||
// and returns one aggregated connectivity state.
|
// and returns one aggregated connectivity state.
|
||||||
//
|
//
|
||||||
|
|
77
vendor/google.golang.org/grpc/balancer/base/balancer.go
generated
vendored
77
vendor/google.golang.org/grpc/balancer/base/balancer.go
generated
vendored
|
@ -19,7 +19,6 @@
|
||||||
package base
|
package base
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
|
@ -32,7 +31,6 @@ import (
|
||||||
type baseBuilder struct {
|
type baseBuilder struct {
|
||||||
name string
|
name string
|
||||||
pickerBuilder PickerBuilder
|
pickerBuilder PickerBuilder
|
||||||
v2PickerBuilder V2PickerBuilder
|
|
||||||
config Config
|
config Config
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -40,7 +38,6 @@ func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions)
|
||||||
bal := &baseBalancer{
|
bal := &baseBalancer{
|
||||||
cc: cc,
|
cc: cc,
|
||||||
pickerBuilder: bb.pickerBuilder,
|
pickerBuilder: bb.pickerBuilder,
|
||||||
v2PickerBuilder: bb.v2PickerBuilder,
|
|
||||||
|
|
||||||
subConns: make(map[resolver.Address]balancer.SubConn),
|
subConns: make(map[resolver.Address]balancer.SubConn),
|
||||||
scStates: make(map[balancer.SubConn]connectivity.State),
|
scStates: make(map[balancer.SubConn]connectivity.State),
|
||||||
|
@ -50,11 +47,7 @@ func (bb *baseBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions)
|
||||||
// Initialize picker to a picker that always returns
|
// Initialize picker to a picker that always returns
|
||||||
// ErrNoSubConnAvailable, because when state of a SubConn changes, we
|
// ErrNoSubConnAvailable, because when state of a SubConn changes, we
|
||||||
// may call UpdateState with this picker.
|
// may call UpdateState with this picker.
|
||||||
if bb.pickerBuilder != nil {
|
|
||||||
bal.picker = NewErrPicker(balancer.ErrNoSubConnAvailable)
|
bal.picker = NewErrPicker(balancer.ErrNoSubConnAvailable)
|
||||||
} else {
|
|
||||||
bal.v2Picker = NewErrPickerV2(balancer.ErrNoSubConnAvailable)
|
|
||||||
}
|
|
||||||
return bal
|
return bal
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -62,12 +55,9 @@ func (bb *baseBuilder) Name() string {
|
||||||
return bb.name
|
return bb.name
|
||||||
}
|
}
|
||||||
|
|
||||||
var _ balancer.V2Balancer = (*baseBalancer)(nil) // Assert that we implement V2Balancer
|
|
||||||
|
|
||||||
type baseBalancer struct {
|
type baseBalancer struct {
|
||||||
cc balancer.ClientConn
|
cc balancer.ClientConn
|
||||||
pickerBuilder PickerBuilder
|
pickerBuilder PickerBuilder
|
||||||
v2PickerBuilder V2PickerBuilder
|
|
||||||
|
|
||||||
csEvltr *balancer.ConnectivityStateEvaluator
|
csEvltr *balancer.ConnectivityStateEvaluator
|
||||||
state connectivity.State
|
state connectivity.State
|
||||||
|
@ -75,40 +65,31 @@ type baseBalancer struct {
|
||||||
subConns map[resolver.Address]balancer.SubConn
|
subConns map[resolver.Address]balancer.SubConn
|
||||||
scStates map[balancer.SubConn]connectivity.State
|
scStates map[balancer.SubConn]connectivity.State
|
||||||
picker balancer.Picker
|
picker balancer.Picker
|
||||||
v2Picker balancer.V2Picker
|
|
||||||
config Config
|
config Config
|
||||||
|
|
||||||
resolverErr error // the last error reported by the resolver; cleared on successful resolution
|
resolverErr error // the last error reported by the resolver; cleared on successful resolution
|
||||||
connErr error // the last connection error; cleared upon leaving TransientFailure
|
connErr error // the last connection error; cleared upon leaving TransientFailure
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
|
|
||||||
panic("not implemented")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *baseBalancer) ResolverError(err error) {
|
func (b *baseBalancer) ResolverError(err error) {
|
||||||
b.resolverErr = err
|
b.resolverErr = err
|
||||||
if len(b.subConns) == 0 {
|
if len(b.subConns) == 0 {
|
||||||
b.state = connectivity.TransientFailure
|
b.state = connectivity.TransientFailure
|
||||||
}
|
}
|
||||||
|
|
||||||
if b.state != connectivity.TransientFailure {
|
if b.state != connectivity.TransientFailure {
|
||||||
// The picker will not change since the balancer does not currently
|
// The picker will not change since the balancer does not currently
|
||||||
// report an error.
|
// report an error.
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
b.regeneratePicker()
|
b.regeneratePicker()
|
||||||
if b.picker != nil {
|
|
||||||
b.cc.UpdateBalancerState(b.state, b.picker)
|
|
||||||
} else {
|
|
||||||
b.cc.UpdateState(balancer.State{
|
b.cc.UpdateState(balancer.State{
|
||||||
ConnectivityState: b.state,
|
ConnectivityState: b.state,
|
||||||
Picker: b.v2Picker,
|
Picker: b.picker,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
|
func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
|
||||||
// TODO: handle s.ResolverState.Err (log if not nil) once implemented.
|
|
||||||
// TODO: handle s.ResolverState.ServiceConfig?
|
// TODO: handle s.ResolverState.ServiceConfig?
|
||||||
if grpclog.V(2) {
|
if grpclog.V(2) {
|
||||||
grpclog.Infoln("base.baseBalancer: got new ClientConn state: ", s)
|
grpclog.Infoln("base.baseBalancer: got new ClientConn state: ", s)
|
||||||
|
@ -137,7 +118,7 @@ func (b *baseBalancer) UpdateClientConnState(s balancer.ClientConnState) error {
|
||||||
b.cc.RemoveSubConn(sc)
|
b.cc.RemoveSubConn(sc)
|
||||||
delete(b.subConns, a)
|
delete(b.subConns, a)
|
||||||
// Keep the state of this sc in b.scStates until sc's state becomes Shutdown.
|
// Keep the state of this sc in b.scStates until sc's state becomes Shutdown.
|
||||||
// The entry will be deleted in HandleSubConnStateChange.
|
// The entry will be deleted in UpdateSubConnState.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// If resolver state contains no addresses, return an error so ClientConn
|
// If resolver state contains no addresses, return an error so ClientConn
|
||||||
|
@ -171,24 +152,9 @@ func (b *baseBalancer) mergeErrors() error {
|
||||||
// - built by the pickerBuilder with all READY SubConns otherwise.
|
// - built by the pickerBuilder with all READY SubConns otherwise.
|
||||||
func (b *baseBalancer) regeneratePicker() {
|
func (b *baseBalancer) regeneratePicker() {
|
||||||
if b.state == connectivity.TransientFailure {
|
if b.state == connectivity.TransientFailure {
|
||||||
if b.pickerBuilder != nil {
|
b.picker = NewErrPicker(b.mergeErrors())
|
||||||
b.picker = NewErrPicker(balancer.ErrTransientFailure)
|
|
||||||
} else {
|
|
||||||
b.v2Picker = NewErrPickerV2(balancer.TransientFailureError(b.mergeErrors()))
|
|
||||||
}
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if b.pickerBuilder != nil {
|
|
||||||
readySCs := make(map[resolver.Address]balancer.SubConn)
|
|
||||||
|
|
||||||
// Filter out all ready SCs from full subConn map.
|
|
||||||
for addr, sc := range b.subConns {
|
|
||||||
if st, ok := b.scStates[sc]; ok && st == connectivity.Ready {
|
|
||||||
readySCs[addr] = sc
|
|
||||||
}
|
|
||||||
}
|
|
||||||
b.picker = b.pickerBuilder.Build(readySCs)
|
|
||||||
} else {
|
|
||||||
readySCs := make(map[balancer.SubConn]SubConnInfo)
|
readySCs := make(map[balancer.SubConn]SubConnInfo)
|
||||||
|
|
||||||
// Filter out all ready SCs from full subConn map.
|
// Filter out all ready SCs from full subConn map.
|
||||||
|
@ -197,12 +163,7 @@ func (b *baseBalancer) regeneratePicker() {
|
||||||
readySCs[sc] = SubConnInfo{Address: addr}
|
readySCs[sc] = SubConnInfo{Address: addr}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
b.v2Picker = b.v2PickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs})
|
b.picker = b.pickerBuilder.Build(PickerBuildInfo{ReadySCs: readySCs})
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (b *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
|
|
||||||
panic("not implemented")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
|
func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.SubConnState) {
|
||||||
|
@ -247,11 +208,7 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su
|
||||||
b.regeneratePicker()
|
b.regeneratePicker()
|
||||||
}
|
}
|
||||||
|
|
||||||
if b.picker != nil {
|
b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.picker})
|
||||||
b.cc.UpdateBalancerState(b.state, b.picker)
|
|
||||||
} else {
|
|
||||||
b.cc.UpdateState(balancer.State{ConnectivityState: b.state, Picker: b.v2Picker})
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close is a nop because base balancer doesn't have internal state to clean up,
|
// Close is a nop because base balancer doesn't have internal state to clean up,
|
||||||
|
@ -259,28 +216,20 @@ func (b *baseBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Su
|
||||||
func (b *baseBalancer) Close() {
|
func (b *baseBalancer) Close() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewErrPicker returns a picker that always returns err on Pick().
|
// NewErrPicker returns a Picker that always returns err on Pick().
|
||||||
func NewErrPicker(err error) balancer.Picker {
|
func NewErrPicker(err error) balancer.Picker {
|
||||||
return &errPicker{err: err}
|
return &errPicker{err: err}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// NewErrPickerV2 is temporarily defined for backward compatibility reasons.
|
||||||
|
//
|
||||||
|
// Deprecated: use NewErrPicker instead.
|
||||||
|
var NewErrPickerV2 = NewErrPicker
|
||||||
|
|
||||||
type errPicker struct {
|
type errPicker struct {
|
||||||
err error // Pick() always returns this err.
|
err error // Pick() always returns this err.
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *errPicker) Pick(context.Context, balancer.PickInfo) (balancer.SubConn, func(balancer.DoneInfo), error) {
|
func (p *errPicker) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
|
||||||
return nil, nil, p.err
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewErrPickerV2 returns a V2Picker that always returns err on Pick().
|
|
||||||
func NewErrPickerV2(err error) balancer.V2Picker {
|
|
||||||
return &errPickerV2{err: err}
|
|
||||||
}
|
|
||||||
|
|
||||||
type errPickerV2 struct {
|
|
||||||
err error // Pick() always returns this err.
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *errPickerV2) Pick(info balancer.PickInfo) (balancer.PickResult, error) {
|
|
||||||
return balancer.PickResult{}, p.err
|
return balancer.PickResult{}, p.err
|
||||||
}
|
}
|
||||||
|
|
37
vendor/google.golang.org/grpc/balancer/base/base.go
generated
vendored
37
vendor/google.golang.org/grpc/balancer/base/base.go
generated
vendored
|
@ -37,15 +37,8 @@ import (
|
||||||
|
|
||||||
// PickerBuilder creates balancer.Picker.
|
// PickerBuilder creates balancer.Picker.
|
||||||
type PickerBuilder interface {
|
type PickerBuilder interface {
|
||||||
// Build takes a slice of ready SubConns, and returns a picker that will be
|
|
||||||
// used by gRPC to pick a SubConn.
|
|
||||||
Build(readySCs map[resolver.Address]balancer.SubConn) balancer.Picker
|
|
||||||
}
|
|
||||||
|
|
||||||
// V2PickerBuilder creates balancer.V2Picker.
|
|
||||||
type V2PickerBuilder interface {
|
|
||||||
// Build returns a picker that will be used by gRPC to pick a SubConn.
|
// Build returns a picker that will be used by gRPC to pick a SubConn.
|
||||||
Build(info PickerBuildInfo) balancer.V2Picker
|
Build(info PickerBuildInfo) balancer.Picker
|
||||||
}
|
}
|
||||||
|
|
||||||
// PickerBuildInfo contains information needed by the picker builder to
|
// PickerBuildInfo contains information needed by the picker builder to
|
||||||
|
@ -62,20 +55,14 @@ type SubConnInfo struct {
|
||||||
Address resolver.Address // the address used to create this SubConn
|
Address resolver.Address // the address used to create this SubConn
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewBalancerBuilder returns a balancer builder. The balancers
|
|
||||||
// built by this builder will use the picker builder to build pickers.
|
|
||||||
func NewBalancerBuilder(name string, pb PickerBuilder) balancer.Builder {
|
|
||||||
return NewBalancerBuilderWithConfig(name, pb, Config{})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Config contains the config info about the base balancer builder.
|
// Config contains the config info about the base balancer builder.
|
||||||
type Config struct {
|
type Config struct {
|
||||||
// HealthCheck indicates whether health checking should be enabled for this specific balancer.
|
// HealthCheck indicates whether health checking should be enabled for this specific balancer.
|
||||||
HealthCheck bool
|
HealthCheck bool
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewBalancerBuilderWithConfig returns a base balancer builder configured by the provided config.
|
// NewBalancerBuilder returns a base balancer builder configured by the provided config.
|
||||||
func NewBalancerBuilderWithConfig(name string, pb PickerBuilder, config Config) balancer.Builder {
|
func NewBalancerBuilder(name string, pb PickerBuilder, config Config) balancer.Builder {
|
||||||
return &baseBuilder{
|
return &baseBuilder{
|
||||||
name: name,
|
name: name,
|
||||||
pickerBuilder: pb,
|
pickerBuilder: pb,
|
||||||
|
@ -83,11 +70,13 @@ func NewBalancerBuilderWithConfig(name string, pb PickerBuilder, config Config)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewBalancerBuilderV2 returns a base balancer builder configured by the provided config.
|
// NewBalancerBuilderV2 is temporarily defined for backward compatibility
|
||||||
func NewBalancerBuilderV2(name string, pb V2PickerBuilder, config Config) balancer.Builder {
|
// reasons.
|
||||||
return &baseBuilder{
|
//
|
||||||
name: name,
|
// Deprecated: use NewBalancerBuilder instead.
|
||||||
v2PickerBuilder: pb,
|
var NewBalancerBuilderV2 = NewBalancerBuilder
|
||||||
config: config,
|
|
||||||
}
|
// V2PickerBuilder is temporarily defined for backward compatibility reasons.
|
||||||
}
|
//
|
||||||
|
// Deprecated: use PickerBuilder instead.
|
||||||
|
type V2PickerBuilder = PickerBuilder
|
||||||
|
|
51
vendor/google.golang.org/grpc/balancer/grpclb/state/state.go
generated
vendored
Normal file
51
vendor/google.golang.org/grpc/balancer/grpclb/state/state.go
generated
vendored
Normal file
|
@ -0,0 +1,51 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2020 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package state declares grpclb types to be set by resolvers wishing to pass
|
||||||
|
// information to grpclb via resolver.State Attributes.
|
||||||
|
package state
|
||||||
|
|
||||||
|
import (
|
||||||
|
"google.golang.org/grpc/resolver"
|
||||||
|
)
|
||||||
|
|
||||||
|
// keyType is the key to use for storing State in Attributes.
|
||||||
|
type keyType string
|
||||||
|
|
||||||
|
const key = keyType("grpc.grpclb.state")
|
||||||
|
|
||||||
|
// State contains gRPCLB-relevant data passed from the name resolver.
|
||||||
|
type State struct {
|
||||||
|
// BalancerAddresses contains the remote load balancer address(es). If
|
||||||
|
// set, overrides any resolver-provided addresses with Type of GRPCLB.
|
||||||
|
BalancerAddresses []resolver.Address
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set returns a copy of the provided state with attributes containing s. s's
|
||||||
|
// data should not be mutated after calling Set.
|
||||||
|
func Set(state resolver.State, s *State) resolver.State {
|
||||||
|
state.Attributes = state.Attributes.WithValues(key, s)
|
||||||
|
return state
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get returns the grpclb State in the resolver.State, or nil if not present.
|
||||||
|
// The returned data should not be mutated.
|
||||||
|
func Get(state resolver.State) *State {
|
||||||
|
s, _ := state.Attributes.Value(key).(*State)
|
||||||
|
return s
|
||||||
|
}
|
6
vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
generated
vendored
6
vendor/google.golang.org/grpc/balancer/roundrobin/roundrobin.go
generated
vendored
|
@ -35,7 +35,7 @@ const Name = "round_robin"
|
||||||
|
|
||||||
// newBuilder creates a new roundrobin balancer builder.
|
// newBuilder creates a new roundrobin balancer builder.
|
||||||
func newBuilder() balancer.Builder {
|
func newBuilder() balancer.Builder {
|
||||||
return base.NewBalancerBuilderV2(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true})
|
return base.NewBalancerBuilder(Name, &rrPickerBuilder{}, base.Config{HealthCheck: true})
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -44,10 +44,10 @@ func init() {
|
||||||
|
|
||||||
type rrPickerBuilder struct{}
|
type rrPickerBuilder struct{}
|
||||||
|
|
||||||
func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.V2Picker {
|
func (*rrPickerBuilder) Build(info base.PickerBuildInfo) balancer.Picker {
|
||||||
grpclog.Infof("roundrobinPicker: newPicker called with info: %v", info)
|
grpclog.Infof("roundrobinPicker: newPicker called with info: %v", info)
|
||||||
if len(info.ReadySCs) == 0 {
|
if len(info.ReadySCs) == 0 {
|
||||||
return base.NewErrPickerV2(balancer.ErrNoSubConnAvailable)
|
return base.NewErrPicker(balancer.ErrNoSubConnAvailable)
|
||||||
}
|
}
|
||||||
var scs []balancer.SubConn
|
var scs []balancer.SubConn
|
||||||
for sc := range info.ReadySCs {
|
for sc := range info.ReadySCs {
|
||||||
|
|
33
vendor/google.golang.org/grpc/balancer_conn_wrappers.go
generated
vendored
33
vendor/google.golang.org/grpc/balancer_conn_wrappers.go
generated
vendored
|
@ -74,11 +74,7 @@ func (ccb *ccBalancerWrapper) watcher() {
|
||||||
}
|
}
|
||||||
ccb.balancerMu.Lock()
|
ccb.balancerMu.Lock()
|
||||||
su := t.(*scStateUpdate)
|
su := t.(*scStateUpdate)
|
||||||
if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
|
ccb.balancer.UpdateSubConnState(su.sc, balancer.SubConnState{ConnectivityState: su.state, ConnectionError: su.err})
|
||||||
ub.UpdateSubConnState(su.sc, balancer.SubConnState{ConnectivityState: su.state, ConnectionError: su.err})
|
|
||||||
} else {
|
|
||||||
ccb.balancer.HandleSubConnStateChange(su.sc, su.state)
|
|
||||||
}
|
|
||||||
ccb.balancerMu.Unlock()
|
ccb.balancerMu.Unlock()
|
||||||
case <-ccb.done.Done():
|
case <-ccb.done.Done():
|
||||||
}
|
}
|
||||||
|
@ -123,20 +119,14 @@ func (ccb *ccBalancerWrapper) handleSubConnStateChange(sc balancer.SubConn, s co
|
||||||
func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error {
|
func (ccb *ccBalancerWrapper) updateClientConnState(ccs *balancer.ClientConnState) error {
|
||||||
ccb.balancerMu.Lock()
|
ccb.balancerMu.Lock()
|
||||||
defer ccb.balancerMu.Unlock()
|
defer ccb.balancerMu.Unlock()
|
||||||
if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
|
return ccb.balancer.UpdateClientConnState(*ccs)
|
||||||
return ub.UpdateClientConnState(*ccs)
|
|
||||||
}
|
|
||||||
ccb.balancer.HandleResolvedAddrs(ccs.ResolverState.Addresses, nil)
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ccb *ccBalancerWrapper) resolverError(err error) {
|
func (ccb *ccBalancerWrapper) resolverError(err error) {
|
||||||
if ub, ok := ccb.balancer.(balancer.V2Balancer); ok {
|
|
||||||
ccb.balancerMu.Lock()
|
ccb.balancerMu.Lock()
|
||||||
ub.ResolverError(err)
|
ccb.balancer.ResolverError(err)
|
||||||
ccb.balancerMu.Unlock()
|
ccb.balancerMu.Unlock()
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
|
func (ccb *ccBalancerWrapper) NewSubConn(addrs []resolver.Address, opts balancer.NewSubConnOptions) (balancer.SubConn, error) {
|
||||||
if len(addrs) <= 0 {
|
if len(addrs) <= 0 {
|
||||||
|
@ -173,21 +163,6 @@ func (ccb *ccBalancerWrapper) RemoveSubConn(sc balancer.SubConn) {
|
||||||
ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
|
ccb.cc.removeAddrConn(acbw.getAddrConn(), errConnDrain)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (ccb *ccBalancerWrapper) UpdateBalancerState(s connectivity.State, p balancer.Picker) {
|
|
||||||
ccb.mu.Lock()
|
|
||||||
defer ccb.mu.Unlock()
|
|
||||||
if ccb.subConns == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Update picker before updating state. Even though the ordering here does
|
|
||||||
// not matter, it can lead to multiple calls of Pick in the common start-up
|
|
||||||
// case where we wait for ready and then perform an RPC. If the picker is
|
|
||||||
// updated later, we could call the "connecting" picker when the state is
|
|
||||||
// updated, and then call the "ready" picker after the picker gets updated.
|
|
||||||
ccb.cc.blockingpicker.updatePicker(p)
|
|
||||||
ccb.cc.csMgr.updateState(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
|
func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
|
||||||
ccb.mu.Lock()
|
ccb.mu.Lock()
|
||||||
defer ccb.mu.Unlock()
|
defer ccb.mu.Unlock()
|
||||||
|
@ -199,7 +174,7 @@ func (ccb *ccBalancerWrapper) UpdateState(s balancer.State) {
|
||||||
// case where we wait for ready and then perform an RPC. If the picker is
|
// case where we wait for ready and then perform an RPC. If the picker is
|
||||||
// updated later, we could call the "connecting" picker when the state is
|
// updated later, we could call the "connecting" picker when the state is
|
||||||
// updated, and then call the "ready" picker after the picker gets updated.
|
// updated, and then call the "ready" picker after the picker gets updated.
|
||||||
ccb.cc.blockingpicker.updatePickerV2(s.Picker)
|
ccb.cc.blockingpicker.updatePicker(s.Picker)
|
||||||
ccb.cc.csMgr.updateState(s.ConnectivityState)
|
ccb.cc.csMgr.updateState(s.ConnectivityState)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
334
vendor/google.golang.org/grpc/balancer_v1_wrapper.go
generated
vendored
334
vendor/google.golang.org/grpc/balancer_v1_wrapper.go
generated
vendored
|
@ -1,334 +0,0 @@
|
||||||
/*
|
|
||||||
*
|
|
||||||
* Copyright 2017 gRPC authors.
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
package grpc
|
|
||||||
|
|
||||||
import (
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"google.golang.org/grpc/balancer"
|
|
||||||
"google.golang.org/grpc/connectivity"
|
|
||||||
"google.golang.org/grpc/grpclog"
|
|
||||||
"google.golang.org/grpc/resolver"
|
|
||||||
)
|
|
||||||
|
|
||||||
type balancerWrapperBuilder struct {
|
|
||||||
b Balancer // The v1 balancer.
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bwb *balancerWrapperBuilder) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Balancer {
|
|
||||||
bwb.b.Start(opts.Target.Endpoint, BalancerConfig{
|
|
||||||
DialCreds: opts.DialCreds,
|
|
||||||
Dialer: opts.Dialer,
|
|
||||||
})
|
|
||||||
_, pickfirst := bwb.b.(*pickFirst)
|
|
||||||
bw := &balancerWrapper{
|
|
||||||
balancer: bwb.b,
|
|
||||||
pickfirst: pickfirst,
|
|
||||||
cc: cc,
|
|
||||||
targetAddr: opts.Target.Endpoint,
|
|
||||||
startCh: make(chan struct{}),
|
|
||||||
conns: make(map[resolver.Address]balancer.SubConn),
|
|
||||||
connSt: make(map[balancer.SubConn]*scState),
|
|
||||||
csEvltr: &balancer.ConnectivityStateEvaluator{},
|
|
||||||
state: connectivity.Idle,
|
|
||||||
}
|
|
||||||
cc.UpdateState(balancer.State{ConnectivityState: connectivity.Idle, Picker: bw})
|
|
||||||
go bw.lbWatcher()
|
|
||||||
return bw
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bwb *balancerWrapperBuilder) Name() string {
|
|
||||||
return "wrapper"
|
|
||||||
}
|
|
||||||
|
|
||||||
type scState struct {
|
|
||||||
addr Address // The v1 address type.
|
|
||||||
s connectivity.State
|
|
||||||
down func(error)
|
|
||||||
}
|
|
||||||
|
|
||||||
type balancerWrapper struct {
|
|
||||||
balancer Balancer // The v1 balancer.
|
|
||||||
pickfirst bool
|
|
||||||
|
|
||||||
cc balancer.ClientConn
|
|
||||||
targetAddr string // Target without the scheme.
|
|
||||||
|
|
||||||
mu sync.Mutex
|
|
||||||
conns map[resolver.Address]balancer.SubConn
|
|
||||||
connSt map[balancer.SubConn]*scState
|
|
||||||
// This channel is closed when handling the first resolver result.
|
|
||||||
// lbWatcher blocks until this is closed, to avoid race between
|
|
||||||
// - NewSubConn is created, cc wants to notify balancer of state changes;
|
|
||||||
// - Build hasn't return, cc doesn't have access to balancer.
|
|
||||||
startCh chan struct{}
|
|
||||||
|
|
||||||
// To aggregate the connectivity state.
|
|
||||||
csEvltr *balancer.ConnectivityStateEvaluator
|
|
||||||
state connectivity.State
|
|
||||||
}
|
|
||||||
|
|
||||||
// lbWatcher watches the Notify channel of the balancer and manages
|
|
||||||
// connections accordingly.
|
|
||||||
func (bw *balancerWrapper) lbWatcher() {
|
|
||||||
<-bw.startCh
|
|
||||||
notifyCh := bw.balancer.Notify()
|
|
||||||
if notifyCh == nil {
|
|
||||||
// There's no resolver in the balancer. Connect directly.
|
|
||||||
a := resolver.Address{
|
|
||||||
Addr: bw.targetAddr,
|
|
||||||
Type: resolver.Backend,
|
|
||||||
}
|
|
||||||
sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{})
|
|
||||||
if err != nil {
|
|
||||||
grpclog.Warningf("Error creating connection to %v. Err: %v", a, err)
|
|
||||||
} else {
|
|
||||||
bw.mu.Lock()
|
|
||||||
bw.conns[a] = sc
|
|
||||||
bw.connSt[sc] = &scState{
|
|
||||||
addr: Address{Addr: bw.targetAddr},
|
|
||||||
s: connectivity.Idle,
|
|
||||||
}
|
|
||||||
bw.mu.Unlock()
|
|
||||||
sc.Connect()
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
for addrs := range notifyCh {
|
|
||||||
grpclog.Infof("balancerWrapper: got update addr from Notify: %v", addrs)
|
|
||||||
if bw.pickfirst {
|
|
||||||
var (
|
|
||||||
oldA resolver.Address
|
|
||||||
oldSC balancer.SubConn
|
|
||||||
)
|
|
||||||
bw.mu.Lock()
|
|
||||||
for oldA, oldSC = range bw.conns {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
bw.mu.Unlock()
|
|
||||||
if len(addrs) <= 0 {
|
|
||||||
if oldSC != nil {
|
|
||||||
// Teardown old sc.
|
|
||||||
bw.mu.Lock()
|
|
||||||
delete(bw.conns, oldA)
|
|
||||||
delete(bw.connSt, oldSC)
|
|
||||||
bw.mu.Unlock()
|
|
||||||
bw.cc.RemoveSubConn(oldSC)
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
var newAddrs []resolver.Address
|
|
||||||
for _, a := range addrs {
|
|
||||||
newAddr := resolver.Address{
|
|
||||||
Addr: a.Addr,
|
|
||||||
Type: resolver.Backend, // All addresses from balancer are all backends.
|
|
||||||
ServerName: "",
|
|
||||||
Metadata: a.Metadata,
|
|
||||||
}
|
|
||||||
newAddrs = append(newAddrs, newAddr)
|
|
||||||
}
|
|
||||||
if oldSC == nil {
|
|
||||||
// Create new sc.
|
|
||||||
sc, err := bw.cc.NewSubConn(newAddrs, balancer.NewSubConnOptions{})
|
|
||||||
if err != nil {
|
|
||||||
grpclog.Warningf("Error creating connection to %v. Err: %v", newAddrs, err)
|
|
||||||
} else {
|
|
||||||
bw.mu.Lock()
|
|
||||||
// For pickfirst, there should be only one SubConn, so the
|
|
||||||
// address doesn't matter. All states updating (up and down)
|
|
||||||
// and picking should all happen on that only SubConn.
|
|
||||||
bw.conns[resolver.Address{}] = sc
|
|
||||||
bw.connSt[sc] = &scState{
|
|
||||||
addr: addrs[0], // Use the first address.
|
|
||||||
s: connectivity.Idle,
|
|
||||||
}
|
|
||||||
bw.mu.Unlock()
|
|
||||||
sc.Connect()
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
bw.mu.Lock()
|
|
||||||
bw.connSt[oldSC].addr = addrs[0]
|
|
||||||
bw.mu.Unlock()
|
|
||||||
oldSC.UpdateAddresses(newAddrs)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
var (
|
|
||||||
add []resolver.Address // Addresses need to setup connections.
|
|
||||||
del []balancer.SubConn // Connections need to tear down.
|
|
||||||
)
|
|
||||||
resAddrs := make(map[resolver.Address]Address)
|
|
||||||
for _, a := range addrs {
|
|
||||||
resAddrs[resolver.Address{
|
|
||||||
Addr: a.Addr,
|
|
||||||
Type: resolver.Backend, // All addresses from balancer are all backends.
|
|
||||||
ServerName: "",
|
|
||||||
Metadata: a.Metadata,
|
|
||||||
}] = a
|
|
||||||
}
|
|
||||||
bw.mu.Lock()
|
|
||||||
for a := range resAddrs {
|
|
||||||
if _, ok := bw.conns[a]; !ok {
|
|
||||||
add = append(add, a)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for a, c := range bw.conns {
|
|
||||||
if _, ok := resAddrs[a]; !ok {
|
|
||||||
del = append(del, c)
|
|
||||||
delete(bw.conns, a)
|
|
||||||
// Keep the state of this sc in bw.connSt until its state becomes Shutdown.
|
|
||||||
}
|
|
||||||
}
|
|
||||||
bw.mu.Unlock()
|
|
||||||
for _, a := range add {
|
|
||||||
sc, err := bw.cc.NewSubConn([]resolver.Address{a}, balancer.NewSubConnOptions{})
|
|
||||||
if err != nil {
|
|
||||||
grpclog.Warningf("Error creating connection to %v. Err: %v", a, err)
|
|
||||||
} else {
|
|
||||||
bw.mu.Lock()
|
|
||||||
bw.conns[a] = sc
|
|
||||||
bw.connSt[sc] = &scState{
|
|
||||||
addr: resAddrs[a],
|
|
||||||
s: connectivity.Idle,
|
|
||||||
}
|
|
||||||
bw.mu.Unlock()
|
|
||||||
sc.Connect()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for _, c := range del {
|
|
||||||
bw.cc.RemoveSubConn(c)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bw *balancerWrapper) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
|
|
||||||
bw.mu.Lock()
|
|
||||||
defer bw.mu.Unlock()
|
|
||||||
scSt, ok := bw.connSt[sc]
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if s == connectivity.Idle {
|
|
||||||
sc.Connect()
|
|
||||||
}
|
|
||||||
oldS := scSt.s
|
|
||||||
scSt.s = s
|
|
||||||
if oldS != connectivity.Ready && s == connectivity.Ready {
|
|
||||||
scSt.down = bw.balancer.Up(scSt.addr)
|
|
||||||
} else if oldS == connectivity.Ready && s != connectivity.Ready {
|
|
||||||
if scSt.down != nil {
|
|
||||||
scSt.down(errConnClosing)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
sa := bw.csEvltr.RecordTransition(oldS, s)
|
|
||||||
if bw.state != sa {
|
|
||||||
bw.state = sa
|
|
||||||
}
|
|
||||||
bw.cc.UpdateState(balancer.State{ConnectivityState: bw.state, Picker: bw})
|
|
||||||
if s == connectivity.Shutdown {
|
|
||||||
// Remove state for this sc.
|
|
||||||
delete(bw.connSt, sc)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bw *balancerWrapper) HandleResolvedAddrs([]resolver.Address, error) {
|
|
||||||
bw.mu.Lock()
|
|
||||||
defer bw.mu.Unlock()
|
|
||||||
select {
|
|
||||||
case <-bw.startCh:
|
|
||||||
default:
|
|
||||||
close(bw.startCh)
|
|
||||||
}
|
|
||||||
// There should be a resolver inside the balancer.
|
|
||||||
// All updates here, if any, are ignored.
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bw *balancerWrapper) Close() {
|
|
||||||
bw.mu.Lock()
|
|
||||||
defer bw.mu.Unlock()
|
|
||||||
select {
|
|
||||||
case <-bw.startCh:
|
|
||||||
default:
|
|
||||||
close(bw.startCh)
|
|
||||||
}
|
|
||||||
bw.balancer.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// The picker is the balancerWrapper itself.
|
|
||||||
// It either blocks or returns error, consistent with v1 balancer Get().
|
|
||||||
func (bw *balancerWrapper) Pick(info balancer.PickInfo) (result balancer.PickResult, err error) {
|
|
||||||
failfast := true // Default failfast is true.
|
|
||||||
if ss, ok := rpcInfoFromContext(info.Ctx); ok {
|
|
||||||
failfast = ss.failfast
|
|
||||||
}
|
|
||||||
a, p, err := bw.balancer.Get(info.Ctx, BalancerGetOptions{BlockingWait: !failfast})
|
|
||||||
if err != nil {
|
|
||||||
return balancer.PickResult{}, toRPCErr(err)
|
|
||||||
}
|
|
||||||
if p != nil {
|
|
||||||
result.Done = func(balancer.DoneInfo) { p() }
|
|
||||||
defer func() {
|
|
||||||
if err != nil {
|
|
||||||
p()
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
bw.mu.Lock()
|
|
||||||
defer bw.mu.Unlock()
|
|
||||||
if bw.pickfirst {
|
|
||||||
// Get the first sc in conns.
|
|
||||||
for _, result.SubConn = range bw.conns {
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
|
|
||||||
}
|
|
||||||
var ok1 bool
|
|
||||||
result.SubConn, ok1 = bw.conns[resolver.Address{
|
|
||||||
Addr: a.Addr,
|
|
||||||
Type: resolver.Backend,
|
|
||||||
ServerName: "",
|
|
||||||
Metadata: a.Metadata,
|
|
||||||
}]
|
|
||||||
s, ok2 := bw.connSt[result.SubConn]
|
|
||||||
if !ok1 || !ok2 {
|
|
||||||
// This can only happen due to a race where Get() returned an address
|
|
||||||
// that was subsequently removed by Notify. In this case we should
|
|
||||||
// retry always.
|
|
||||||
return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
|
|
||||||
}
|
|
||||||
switch s.s {
|
|
||||||
case connectivity.Ready, connectivity.Idle:
|
|
||||||
return result, nil
|
|
||||||
case connectivity.Shutdown, connectivity.TransientFailure:
|
|
||||||
// If the returned sc has been shut down or is in transient failure,
|
|
||||||
// return error, and this RPC will fail or wait for another picker (if
|
|
||||||
// non-failfast).
|
|
||||||
return balancer.PickResult{}, balancer.ErrTransientFailure
|
|
||||||
default:
|
|
||||||
// For other states (connecting or unknown), the v1 balancer would
|
|
||||||
// traditionally wait until ready and then issue the RPC. Returning
|
|
||||||
// ErrNoSubConnAvailable will be a slight improvement in that it will
|
|
||||||
// allow the balancer to choose another address in case others are
|
|
||||||
// connected.
|
|
||||||
return balancer.PickResult{}, balancer.ErrNoSubConnAvailable
|
|
||||||
}
|
|
||||||
}
|
|
320
vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
generated
vendored
320
vendor/google.golang.org/grpc/binarylog/grpc_binarylog_v1/binarylog.pb.go
generated
vendored
|
@ -1,13 +1,15 @@
|
||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
// source: grpc/binarylog/grpc_binarylog_v1/binarylog.proto
|
// source: grpc/binlog/v1/binarylog.proto
|
||||||
|
|
||||||
package grpc_binarylog_v1 // import "google.golang.org/grpc/binarylog/grpc_binarylog_v1"
|
package grpc_binarylog_v1
|
||||||
|
|
||||||
import proto "github.com/golang/protobuf/proto"
|
import (
|
||||||
import fmt "fmt"
|
fmt "fmt"
|
||||||
import math "math"
|
proto "github.com/golang/protobuf/proto"
|
||||||
import duration "github.com/golang/protobuf/ptypes/duration"
|
duration "github.com/golang/protobuf/ptypes/duration"
|
||||||
import timestamp "github.com/golang/protobuf/ptypes/timestamp"
|
timestamp "github.com/golang/protobuf/ptypes/timestamp"
|
||||||
|
math "math"
|
||||||
|
)
|
||||||
|
|
||||||
// Reference imports to suppress errors if they are not otherwise used.
|
// Reference imports to suppress errors if they are not otherwise used.
|
||||||
var _ = proto.Marshal
|
var _ = proto.Marshal
|
||||||
|
@ -18,7 +20,7 @@ var _ = math.Inf
|
||||||
// is compatible with the proto package it is being compiled against.
|
// is compatible with the proto package it is being compiled against.
|
||||||
// A compilation error at this line likely means your copy of the
|
// A compilation error at this line likely means your copy of the
|
||||||
// proto package needs to be updated.
|
// proto package needs to be updated.
|
||||||
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
|
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
|
||||||
|
|
||||||
// Enumerates the type of event
|
// Enumerates the type of event
|
||||||
// Note the terminology is different from the RPC semantics
|
// Note the terminology is different from the RPC semantics
|
||||||
|
@ -64,6 +66,7 @@ var GrpcLogEntry_EventType_name = map[int32]string{
|
||||||
6: "EVENT_TYPE_SERVER_TRAILER",
|
6: "EVENT_TYPE_SERVER_TRAILER",
|
||||||
7: "EVENT_TYPE_CANCEL",
|
7: "EVENT_TYPE_CANCEL",
|
||||||
}
|
}
|
||||||
|
|
||||||
var GrpcLogEntry_EventType_value = map[string]int32{
|
var GrpcLogEntry_EventType_value = map[string]int32{
|
||||||
"EVENT_TYPE_UNKNOWN": 0,
|
"EVENT_TYPE_UNKNOWN": 0,
|
||||||
"EVENT_TYPE_CLIENT_HEADER": 1,
|
"EVENT_TYPE_CLIENT_HEADER": 1,
|
||||||
|
@ -78,8 +81,9 @@ var GrpcLogEntry_EventType_value = map[string]int32{
|
||||||
func (x GrpcLogEntry_EventType) String() string {
|
func (x GrpcLogEntry_EventType) String() string {
|
||||||
return proto.EnumName(GrpcLogEntry_EventType_name, int32(x))
|
return proto.EnumName(GrpcLogEntry_EventType_name, int32(x))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (GrpcLogEntry_EventType) EnumDescriptor() ([]byte, []int) {
|
func (GrpcLogEntry_EventType) EnumDescriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_binarylog_264c8c9c551ce911, []int{0, 0}
|
return fileDescriptor_b7972e58de45083a, []int{0, 0}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Enumerates the entity that generates the log entry
|
// Enumerates the entity that generates the log entry
|
||||||
|
@ -96,6 +100,7 @@ var GrpcLogEntry_Logger_name = map[int32]string{
|
||||||
1: "LOGGER_CLIENT",
|
1: "LOGGER_CLIENT",
|
||||||
2: "LOGGER_SERVER",
|
2: "LOGGER_SERVER",
|
||||||
}
|
}
|
||||||
|
|
||||||
var GrpcLogEntry_Logger_value = map[string]int32{
|
var GrpcLogEntry_Logger_value = map[string]int32{
|
||||||
"LOGGER_UNKNOWN": 0,
|
"LOGGER_UNKNOWN": 0,
|
||||||
"LOGGER_CLIENT": 1,
|
"LOGGER_CLIENT": 1,
|
||||||
|
@ -105,8 +110,9 @@ var GrpcLogEntry_Logger_value = map[string]int32{
|
||||||
func (x GrpcLogEntry_Logger) String() string {
|
func (x GrpcLogEntry_Logger) String() string {
|
||||||
return proto.EnumName(GrpcLogEntry_Logger_name, int32(x))
|
return proto.EnumName(GrpcLogEntry_Logger_name, int32(x))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (GrpcLogEntry_Logger) EnumDescriptor() ([]byte, []int) {
|
func (GrpcLogEntry_Logger) EnumDescriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_binarylog_264c8c9c551ce911, []int{0, 1}
|
return fileDescriptor_b7972e58de45083a, []int{0, 1}
|
||||||
}
|
}
|
||||||
|
|
||||||
type Address_Type int32
|
type Address_Type int32
|
||||||
|
@ -128,6 +134,7 @@ var Address_Type_name = map[int32]string{
|
||||||
2: "TYPE_IPV6",
|
2: "TYPE_IPV6",
|
||||||
3: "TYPE_UNIX",
|
3: "TYPE_UNIX",
|
||||||
}
|
}
|
||||||
|
|
||||||
var Address_Type_value = map[string]int32{
|
var Address_Type_value = map[string]int32{
|
||||||
"TYPE_UNKNOWN": 0,
|
"TYPE_UNKNOWN": 0,
|
||||||
"TYPE_IPV4": 1,
|
"TYPE_IPV4": 1,
|
||||||
|
@ -138,8 +145,9 @@ var Address_Type_value = map[string]int32{
|
||||||
func (x Address_Type) String() string {
|
func (x Address_Type) String() string {
|
||||||
return proto.EnumName(Address_Type_name, int32(x))
|
return proto.EnumName(Address_Type_name, int32(x))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (Address_Type) EnumDescriptor() ([]byte, []int) {
|
func (Address_Type) EnumDescriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_binarylog_264c8c9c551ce911, []int{7, 0}
|
return fileDescriptor_b7972e58de45083a, []int{7, 0}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Log entry we store in binary logs
|
// Log entry we store in binary logs
|
||||||
|
@ -185,16 +193,17 @@ func (m *GrpcLogEntry) Reset() { *m = GrpcLogEntry{} }
|
||||||
func (m *GrpcLogEntry) String() string { return proto.CompactTextString(m) }
|
func (m *GrpcLogEntry) String() string { return proto.CompactTextString(m) }
|
||||||
func (*GrpcLogEntry) ProtoMessage() {}
|
func (*GrpcLogEntry) ProtoMessage() {}
|
||||||
func (*GrpcLogEntry) Descriptor() ([]byte, []int) {
|
func (*GrpcLogEntry) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_binarylog_264c8c9c551ce911, []int{0}
|
return fileDescriptor_b7972e58de45083a, []int{0}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *GrpcLogEntry) XXX_Unmarshal(b []byte) error {
|
func (m *GrpcLogEntry) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_GrpcLogEntry.Unmarshal(m, b)
|
return xxx_messageInfo_GrpcLogEntry.Unmarshal(m, b)
|
||||||
}
|
}
|
||||||
func (m *GrpcLogEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
func (m *GrpcLogEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
return xxx_messageInfo_GrpcLogEntry.Marshal(b, m, deterministic)
|
return xxx_messageInfo_GrpcLogEntry.Marshal(b, m, deterministic)
|
||||||
}
|
}
|
||||||
func (dst *GrpcLogEntry) XXX_Merge(src proto.Message) {
|
func (m *GrpcLogEntry) XXX_Merge(src proto.Message) {
|
||||||
xxx_messageInfo_GrpcLogEntry.Merge(dst, src)
|
xxx_messageInfo_GrpcLogEntry.Merge(m, src)
|
||||||
}
|
}
|
||||||
func (m *GrpcLogEntry) XXX_Size() int {
|
func (m *GrpcLogEntry) XXX_Size() int {
|
||||||
return xxx_messageInfo_GrpcLogEntry.Size(m)
|
return xxx_messageInfo_GrpcLogEntry.Size(m)
|
||||||
|
@ -317,9 +326,9 @@ func (m *GrpcLogEntry) GetPeer() *Address {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// XXX_OneofFuncs is for the internal use of the proto package.
|
// XXX_OneofWrappers is for the internal use of the proto package.
|
||||||
func (*GrpcLogEntry) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
|
func (*GrpcLogEntry) XXX_OneofWrappers() []interface{} {
|
||||||
return _GrpcLogEntry_OneofMarshaler, _GrpcLogEntry_OneofUnmarshaler, _GrpcLogEntry_OneofSizer, []interface{}{
|
return []interface{}{
|
||||||
(*GrpcLogEntry_ClientHeader)(nil),
|
(*GrpcLogEntry_ClientHeader)(nil),
|
||||||
(*GrpcLogEntry_ServerHeader)(nil),
|
(*GrpcLogEntry_ServerHeader)(nil),
|
||||||
(*GrpcLogEntry_Message)(nil),
|
(*GrpcLogEntry_Message)(nil),
|
||||||
|
@ -327,108 +336,6 @@ func (*GrpcLogEntry) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func _GrpcLogEntry_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
|
|
||||||
m := msg.(*GrpcLogEntry)
|
|
||||||
// payload
|
|
||||||
switch x := m.Payload.(type) {
|
|
||||||
case *GrpcLogEntry_ClientHeader:
|
|
||||||
b.EncodeVarint(6<<3 | proto.WireBytes)
|
|
||||||
if err := b.EncodeMessage(x.ClientHeader); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
case *GrpcLogEntry_ServerHeader:
|
|
||||||
b.EncodeVarint(7<<3 | proto.WireBytes)
|
|
||||||
if err := b.EncodeMessage(x.ServerHeader); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
case *GrpcLogEntry_Message:
|
|
||||||
b.EncodeVarint(8<<3 | proto.WireBytes)
|
|
||||||
if err := b.EncodeMessage(x.Message); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
case *GrpcLogEntry_Trailer:
|
|
||||||
b.EncodeVarint(9<<3 | proto.WireBytes)
|
|
||||||
if err := b.EncodeMessage(x.Trailer); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
case nil:
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("GrpcLogEntry.Payload has unexpected type %T", x)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func _GrpcLogEntry_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
|
|
||||||
m := msg.(*GrpcLogEntry)
|
|
||||||
switch tag {
|
|
||||||
case 6: // payload.client_header
|
|
||||||
if wire != proto.WireBytes {
|
|
||||||
return true, proto.ErrInternalBadWireType
|
|
||||||
}
|
|
||||||
msg := new(ClientHeader)
|
|
||||||
err := b.DecodeMessage(msg)
|
|
||||||
m.Payload = &GrpcLogEntry_ClientHeader{msg}
|
|
||||||
return true, err
|
|
||||||
case 7: // payload.server_header
|
|
||||||
if wire != proto.WireBytes {
|
|
||||||
return true, proto.ErrInternalBadWireType
|
|
||||||
}
|
|
||||||
msg := new(ServerHeader)
|
|
||||||
err := b.DecodeMessage(msg)
|
|
||||||
m.Payload = &GrpcLogEntry_ServerHeader{msg}
|
|
||||||
return true, err
|
|
||||||
case 8: // payload.message
|
|
||||||
if wire != proto.WireBytes {
|
|
||||||
return true, proto.ErrInternalBadWireType
|
|
||||||
}
|
|
||||||
msg := new(Message)
|
|
||||||
err := b.DecodeMessage(msg)
|
|
||||||
m.Payload = &GrpcLogEntry_Message{msg}
|
|
||||||
return true, err
|
|
||||||
case 9: // payload.trailer
|
|
||||||
if wire != proto.WireBytes {
|
|
||||||
return true, proto.ErrInternalBadWireType
|
|
||||||
}
|
|
||||||
msg := new(Trailer)
|
|
||||||
err := b.DecodeMessage(msg)
|
|
||||||
m.Payload = &GrpcLogEntry_Trailer{msg}
|
|
||||||
return true, err
|
|
||||||
default:
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func _GrpcLogEntry_OneofSizer(msg proto.Message) (n int) {
|
|
||||||
m := msg.(*GrpcLogEntry)
|
|
||||||
// payload
|
|
||||||
switch x := m.Payload.(type) {
|
|
||||||
case *GrpcLogEntry_ClientHeader:
|
|
||||||
s := proto.Size(x.ClientHeader)
|
|
||||||
n += 1 // tag and wire
|
|
||||||
n += proto.SizeVarint(uint64(s))
|
|
||||||
n += s
|
|
||||||
case *GrpcLogEntry_ServerHeader:
|
|
||||||
s := proto.Size(x.ServerHeader)
|
|
||||||
n += 1 // tag and wire
|
|
||||||
n += proto.SizeVarint(uint64(s))
|
|
||||||
n += s
|
|
||||||
case *GrpcLogEntry_Message:
|
|
||||||
s := proto.Size(x.Message)
|
|
||||||
n += 1 // tag and wire
|
|
||||||
n += proto.SizeVarint(uint64(s))
|
|
||||||
n += s
|
|
||||||
case *GrpcLogEntry_Trailer:
|
|
||||||
s := proto.Size(x.Trailer)
|
|
||||||
n += 1 // tag and wire
|
|
||||||
n += proto.SizeVarint(uint64(s))
|
|
||||||
n += s
|
|
||||||
case nil:
|
|
||||||
default:
|
|
||||||
panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
|
|
||||||
}
|
|
||||||
return n
|
|
||||||
}
|
|
||||||
|
|
||||||
type ClientHeader struct {
|
type ClientHeader struct {
|
||||||
// This contains only the metadata from the application.
|
// This contains only the metadata from the application.
|
||||||
Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
|
Metadata *Metadata `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
|
||||||
|
@ -453,16 +360,17 @@ func (m *ClientHeader) Reset() { *m = ClientHeader{} }
|
||||||
func (m *ClientHeader) String() string { return proto.CompactTextString(m) }
|
func (m *ClientHeader) String() string { return proto.CompactTextString(m) }
|
||||||
func (*ClientHeader) ProtoMessage() {}
|
func (*ClientHeader) ProtoMessage() {}
|
||||||
func (*ClientHeader) Descriptor() ([]byte, []int) {
|
func (*ClientHeader) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_binarylog_264c8c9c551ce911, []int{1}
|
return fileDescriptor_b7972e58de45083a, []int{1}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *ClientHeader) XXX_Unmarshal(b []byte) error {
|
func (m *ClientHeader) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_ClientHeader.Unmarshal(m, b)
|
return xxx_messageInfo_ClientHeader.Unmarshal(m, b)
|
||||||
}
|
}
|
||||||
func (m *ClientHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
func (m *ClientHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
return xxx_messageInfo_ClientHeader.Marshal(b, m, deterministic)
|
return xxx_messageInfo_ClientHeader.Marshal(b, m, deterministic)
|
||||||
}
|
}
|
||||||
func (dst *ClientHeader) XXX_Merge(src proto.Message) {
|
func (m *ClientHeader) XXX_Merge(src proto.Message) {
|
||||||
xxx_messageInfo_ClientHeader.Merge(dst, src)
|
xxx_messageInfo_ClientHeader.Merge(m, src)
|
||||||
}
|
}
|
||||||
func (m *ClientHeader) XXX_Size() int {
|
func (m *ClientHeader) XXX_Size() int {
|
||||||
return xxx_messageInfo_ClientHeader.Size(m)
|
return xxx_messageInfo_ClientHeader.Size(m)
|
||||||
|
@ -513,16 +421,17 @@ func (m *ServerHeader) Reset() { *m = ServerHeader{} }
|
||||||
func (m *ServerHeader) String() string { return proto.CompactTextString(m) }
|
func (m *ServerHeader) String() string { return proto.CompactTextString(m) }
|
||||||
func (*ServerHeader) ProtoMessage() {}
|
func (*ServerHeader) ProtoMessage() {}
|
||||||
func (*ServerHeader) Descriptor() ([]byte, []int) {
|
func (*ServerHeader) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_binarylog_264c8c9c551ce911, []int{2}
|
return fileDescriptor_b7972e58de45083a, []int{2}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *ServerHeader) XXX_Unmarshal(b []byte) error {
|
func (m *ServerHeader) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_ServerHeader.Unmarshal(m, b)
|
return xxx_messageInfo_ServerHeader.Unmarshal(m, b)
|
||||||
}
|
}
|
||||||
func (m *ServerHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
func (m *ServerHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
return xxx_messageInfo_ServerHeader.Marshal(b, m, deterministic)
|
return xxx_messageInfo_ServerHeader.Marshal(b, m, deterministic)
|
||||||
}
|
}
|
||||||
func (dst *ServerHeader) XXX_Merge(src proto.Message) {
|
func (m *ServerHeader) XXX_Merge(src proto.Message) {
|
||||||
xxx_messageInfo_ServerHeader.Merge(dst, src)
|
xxx_messageInfo_ServerHeader.Merge(m, src)
|
||||||
}
|
}
|
||||||
func (m *ServerHeader) XXX_Size() int {
|
func (m *ServerHeader) XXX_Size() int {
|
||||||
return xxx_messageInfo_ServerHeader.Size(m)
|
return xxx_messageInfo_ServerHeader.Size(m)
|
||||||
|
@ -560,16 +469,17 @@ func (m *Trailer) Reset() { *m = Trailer{} }
|
||||||
func (m *Trailer) String() string { return proto.CompactTextString(m) }
|
func (m *Trailer) String() string { return proto.CompactTextString(m) }
|
||||||
func (*Trailer) ProtoMessage() {}
|
func (*Trailer) ProtoMessage() {}
|
||||||
func (*Trailer) Descriptor() ([]byte, []int) {
|
func (*Trailer) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_binarylog_264c8c9c551ce911, []int{3}
|
return fileDescriptor_b7972e58de45083a, []int{3}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Trailer) XXX_Unmarshal(b []byte) error {
|
func (m *Trailer) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_Trailer.Unmarshal(m, b)
|
return xxx_messageInfo_Trailer.Unmarshal(m, b)
|
||||||
}
|
}
|
||||||
func (m *Trailer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
func (m *Trailer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
return xxx_messageInfo_Trailer.Marshal(b, m, deterministic)
|
return xxx_messageInfo_Trailer.Marshal(b, m, deterministic)
|
||||||
}
|
}
|
||||||
func (dst *Trailer) XXX_Merge(src proto.Message) {
|
func (m *Trailer) XXX_Merge(src proto.Message) {
|
||||||
xxx_messageInfo_Trailer.Merge(dst, src)
|
xxx_messageInfo_Trailer.Merge(m, src)
|
||||||
}
|
}
|
||||||
func (m *Trailer) XXX_Size() int {
|
func (m *Trailer) XXX_Size() int {
|
||||||
return xxx_messageInfo_Trailer.Size(m)
|
return xxx_messageInfo_Trailer.Size(m)
|
||||||
|
@ -624,16 +534,17 @@ func (m *Message) Reset() { *m = Message{} }
|
||||||
func (m *Message) String() string { return proto.CompactTextString(m) }
|
func (m *Message) String() string { return proto.CompactTextString(m) }
|
||||||
func (*Message) ProtoMessage() {}
|
func (*Message) ProtoMessage() {}
|
||||||
func (*Message) Descriptor() ([]byte, []int) {
|
func (*Message) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_binarylog_264c8c9c551ce911, []int{4}
|
return fileDescriptor_b7972e58de45083a, []int{4}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Message) XXX_Unmarshal(b []byte) error {
|
func (m *Message) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_Message.Unmarshal(m, b)
|
return xxx_messageInfo_Message.Unmarshal(m, b)
|
||||||
}
|
}
|
||||||
func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
return xxx_messageInfo_Message.Marshal(b, m, deterministic)
|
return xxx_messageInfo_Message.Marshal(b, m, deterministic)
|
||||||
}
|
}
|
||||||
func (dst *Message) XXX_Merge(src proto.Message) {
|
func (m *Message) XXX_Merge(src proto.Message) {
|
||||||
xxx_messageInfo_Message.Merge(dst, src)
|
xxx_messageInfo_Message.Merge(m, src)
|
||||||
}
|
}
|
||||||
func (m *Message) XXX_Size() int {
|
func (m *Message) XXX_Size() int {
|
||||||
return xxx_messageInfo_Message.Size(m)
|
return xxx_messageInfo_Message.Size(m)
|
||||||
|
@ -690,16 +601,17 @@ func (m *Metadata) Reset() { *m = Metadata{} }
|
||||||
func (m *Metadata) String() string { return proto.CompactTextString(m) }
|
func (m *Metadata) String() string { return proto.CompactTextString(m) }
|
||||||
func (*Metadata) ProtoMessage() {}
|
func (*Metadata) ProtoMessage() {}
|
||||||
func (*Metadata) Descriptor() ([]byte, []int) {
|
func (*Metadata) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_binarylog_264c8c9c551ce911, []int{5}
|
return fileDescriptor_b7972e58de45083a, []int{5}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Metadata) XXX_Unmarshal(b []byte) error {
|
func (m *Metadata) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_Metadata.Unmarshal(m, b)
|
return xxx_messageInfo_Metadata.Unmarshal(m, b)
|
||||||
}
|
}
|
||||||
func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
return xxx_messageInfo_Metadata.Marshal(b, m, deterministic)
|
return xxx_messageInfo_Metadata.Marshal(b, m, deterministic)
|
||||||
}
|
}
|
||||||
func (dst *Metadata) XXX_Merge(src proto.Message) {
|
func (m *Metadata) XXX_Merge(src proto.Message) {
|
||||||
xxx_messageInfo_Metadata.Merge(dst, src)
|
xxx_messageInfo_Metadata.Merge(m, src)
|
||||||
}
|
}
|
||||||
func (m *Metadata) XXX_Size() int {
|
func (m *Metadata) XXX_Size() int {
|
||||||
return xxx_messageInfo_Metadata.Size(m)
|
return xxx_messageInfo_Metadata.Size(m)
|
||||||
|
@ -730,16 +642,17 @@ func (m *MetadataEntry) Reset() { *m = MetadataEntry{} }
|
||||||
func (m *MetadataEntry) String() string { return proto.CompactTextString(m) }
|
func (m *MetadataEntry) String() string { return proto.CompactTextString(m) }
|
||||||
func (*MetadataEntry) ProtoMessage() {}
|
func (*MetadataEntry) ProtoMessage() {}
|
||||||
func (*MetadataEntry) Descriptor() ([]byte, []int) {
|
func (*MetadataEntry) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_binarylog_264c8c9c551ce911, []int{6}
|
return fileDescriptor_b7972e58de45083a, []int{6}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MetadataEntry) XXX_Unmarshal(b []byte) error {
|
func (m *MetadataEntry) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_MetadataEntry.Unmarshal(m, b)
|
return xxx_messageInfo_MetadataEntry.Unmarshal(m, b)
|
||||||
}
|
}
|
||||||
func (m *MetadataEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
func (m *MetadataEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
return xxx_messageInfo_MetadataEntry.Marshal(b, m, deterministic)
|
return xxx_messageInfo_MetadataEntry.Marshal(b, m, deterministic)
|
||||||
}
|
}
|
||||||
func (dst *MetadataEntry) XXX_Merge(src proto.Message) {
|
func (m *MetadataEntry) XXX_Merge(src proto.Message) {
|
||||||
xxx_messageInfo_MetadataEntry.Merge(dst, src)
|
xxx_messageInfo_MetadataEntry.Merge(m, src)
|
||||||
}
|
}
|
||||||
func (m *MetadataEntry) XXX_Size() int {
|
func (m *MetadataEntry) XXX_Size() int {
|
||||||
return xxx_messageInfo_MetadataEntry.Size(m)
|
return xxx_messageInfo_MetadataEntry.Size(m)
|
||||||
|
@ -779,16 +692,17 @@ func (m *Address) Reset() { *m = Address{} }
|
||||||
func (m *Address) String() string { return proto.CompactTextString(m) }
|
func (m *Address) String() string { return proto.CompactTextString(m) }
|
||||||
func (*Address) ProtoMessage() {}
|
func (*Address) ProtoMessage() {}
|
||||||
func (*Address) Descriptor() ([]byte, []int) {
|
func (*Address) Descriptor() ([]byte, []int) {
|
||||||
return fileDescriptor_binarylog_264c8c9c551ce911, []int{7}
|
return fileDescriptor_b7972e58de45083a, []int{7}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Address) XXX_Unmarshal(b []byte) error {
|
func (m *Address) XXX_Unmarshal(b []byte) error {
|
||||||
return xxx_messageInfo_Address.Unmarshal(m, b)
|
return xxx_messageInfo_Address.Unmarshal(m, b)
|
||||||
}
|
}
|
||||||
func (m *Address) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
func (m *Address) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||||
return xxx_messageInfo_Address.Marshal(b, m, deterministic)
|
return xxx_messageInfo_Address.Marshal(b, m, deterministic)
|
||||||
}
|
}
|
||||||
func (dst *Address) XXX_Merge(src proto.Message) {
|
func (m *Address) XXX_Merge(src proto.Message) {
|
||||||
xxx_messageInfo_Address.Merge(dst, src)
|
xxx_messageInfo_Address.Merge(m, src)
|
||||||
}
|
}
|
||||||
func (m *Address) XXX_Size() int {
|
func (m *Address) XXX_Size() int {
|
||||||
return xxx_messageInfo_Address.Size(m)
|
return xxx_messageInfo_Address.Size(m)
|
||||||
|
@ -821,6 +735,9 @@ func (m *Address) GetIpPort() uint32 {
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_EventType", GrpcLogEntry_EventType_name, GrpcLogEntry_EventType_value)
|
||||||
|
proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_Logger", GrpcLogEntry_Logger_name, GrpcLogEntry_Logger_value)
|
||||||
|
proto.RegisterEnum("grpc.binarylog.v1.Address_Type", Address_Type_name, Address_Type_value)
|
||||||
proto.RegisterType((*GrpcLogEntry)(nil), "grpc.binarylog.v1.GrpcLogEntry")
|
proto.RegisterType((*GrpcLogEntry)(nil), "grpc.binarylog.v1.GrpcLogEntry")
|
||||||
proto.RegisterType((*ClientHeader)(nil), "grpc.binarylog.v1.ClientHeader")
|
proto.RegisterType((*ClientHeader)(nil), "grpc.binarylog.v1.ClientHeader")
|
||||||
proto.RegisterType((*ServerHeader)(nil), "grpc.binarylog.v1.ServerHeader")
|
proto.RegisterType((*ServerHeader)(nil), "grpc.binarylog.v1.ServerHeader")
|
||||||
|
@ -829,72 +746,67 @@ func init() {
|
||||||
proto.RegisterType((*Metadata)(nil), "grpc.binarylog.v1.Metadata")
|
proto.RegisterType((*Metadata)(nil), "grpc.binarylog.v1.Metadata")
|
||||||
proto.RegisterType((*MetadataEntry)(nil), "grpc.binarylog.v1.MetadataEntry")
|
proto.RegisterType((*MetadataEntry)(nil), "grpc.binarylog.v1.MetadataEntry")
|
||||||
proto.RegisterType((*Address)(nil), "grpc.binarylog.v1.Address")
|
proto.RegisterType((*Address)(nil), "grpc.binarylog.v1.Address")
|
||||||
proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_EventType", GrpcLogEntry_EventType_name, GrpcLogEntry_EventType_value)
|
|
||||||
proto.RegisterEnum("grpc.binarylog.v1.GrpcLogEntry_Logger", GrpcLogEntry_Logger_name, GrpcLogEntry_Logger_value)
|
|
||||||
proto.RegisterEnum("grpc.binarylog.v1.Address_Type", Address_Type_name, Address_Type_value)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() { proto.RegisterFile("grpc/binlog/v1/binarylog.proto", fileDescriptor_b7972e58de45083a) }
|
||||||
proto.RegisterFile("grpc/binarylog/grpc_binarylog_v1/binarylog.proto", fileDescriptor_binarylog_264c8c9c551ce911)
|
|
||||||
}
|
|
||||||
|
|
||||||
var fileDescriptor_binarylog_264c8c9c551ce911 = []byte{
|
var fileDescriptor_b7972e58de45083a = []byte{
|
||||||
// 900 bytes of a gzipped FileDescriptorProto
|
// 904 bytes of a gzipped FileDescriptorProto
|
||||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x51, 0x6f, 0xe3, 0x44,
|
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x51, 0x6f, 0xe3, 0x44,
|
||||||
0x10, 0x3e, 0x37, 0x69, 0xdc, 0x4c, 0x92, 0xca, 0x5d, 0x95, 0x3b, 0x5f, 0x29, 0x34, 0xb2, 0x04,
|
0x10, 0xae, 0xdb, 0x34, 0x6e, 0x26, 0x49, 0xe5, 0xae, 0xca, 0x9d, 0xaf, 0x94, 0x6b, 0x64, 0x09,
|
||||||
0x0a, 0x42, 0x72, 0xb9, 0x94, 0xeb, 0xf1, 0x02, 0x52, 0x92, 0xfa, 0xd2, 0x88, 0x5c, 0x1a, 0x6d,
|
0x14, 0x84, 0xe4, 0xa8, 0x29, 0xd7, 0xe3, 0x05, 0xa4, 0x24, 0xf5, 0xa5, 0x11, 0xb9, 0x34, 0xda,
|
||||||
0x72, 0x3d, 0x40, 0x48, 0xd6, 0x36, 0x5e, 0x1c, 0x0b, 0xc7, 0x6b, 0xd6, 0x9b, 0xa0, 0xfc, 0x2c,
|
0xe4, 0x7a, 0x80, 0x90, 0xac, 0x6d, 0xbc, 0x38, 0x16, 0x8e, 0xd7, 0xac, 0x37, 0x41, 0xf9, 0x59,
|
||||||
0xde, 0x90, 0xee, 0x77, 0xf1, 0x8e, 0xbc, 0x6b, 0x27, 0xa6, 0x69, 0x0f, 0x09, 0xde, 0x3c, 0xdf,
|
0xbc, 0x21, 0xdd, 0xef, 0xe2, 0x1d, 0x79, 0xd7, 0x4e, 0x4d, 0xd3, 0x82, 0xc4, 0xbd, 0xed, 0x7c,
|
||||||
0x7c, 0xf3, 0xcd, 0xee, 0x78, 0x66, 0x16, 0xbe, 0xf2, 0x79, 0x3c, 0x3b, 0xbf, 0x0b, 0x22, 0xc2,
|
0xf3, 0xcd, 0x37, 0xbb, 0xe3, 0x99, 0x31, 0xbc, 0xf4, 0x79, 0x3c, 0x6b, 0xdd, 0x05, 0x51, 0xc8,
|
||||||
0xd7, 0x21, 0xf3, 0xcf, 0x53, 0xd3, 0xdd, 0x98, 0xee, 0xea, 0xc5, 0xd6, 0x67, 0xc7, 0x9c, 0x09,
|
0xfc, 0xd6, 0xea, 0x3c, 0x3d, 0x11, 0xbe, 0x0e, 0x99, 0x6f, 0xc7, 0x9c, 0x09, 0x86, 0x8e, 0x52,
|
||||||
0x86, 0x8e, 0x52, 0x8a, 0xbd, 0x45, 0x57, 0x2f, 0x4e, 0x3e, 0xf5, 0x19, 0xf3, 0x43, 0x7a, 0x2e,
|
0xbf, 0x7d, 0x8f, 0xae, 0xce, 0x4f, 0x5e, 0xfa, 0x8c, 0xf9, 0x21, 0x6d, 0x49, 0xc2, 0xdd, 0xf2,
|
||||||
0x09, 0x77, 0xcb, 0x5f, 0xce, 0xbd, 0x25, 0x27, 0x22, 0x60, 0x91, 0x0a, 0x39, 0x39, 0xbb, 0xef,
|
0x97, 0x96, 0xb7, 0xe4, 0x44, 0x04, 0x2c, 0x52, 0x21, 0x27, 0x67, 0x0f, 0xfd, 0x22, 0x58, 0xd0,
|
||||||
0x17, 0xc1, 0x82, 0x26, 0x82, 0x2c, 0x62, 0x45, 0xb0, 0xde, 0xeb, 0x50, 0xef, 0xf3, 0x78, 0x36,
|
0x44, 0x90, 0x45, 0xac, 0x08, 0xd6, 0x07, 0x1d, 0x6a, 0x7d, 0x1e, 0xcf, 0x86, 0xcc, 0x77, 0x22,
|
||||||
0x64, 0xbe, 0x13, 0x09, 0xbe, 0x46, 0xdf, 0x40, 0x75, 0xc3, 0x31, 0xb5, 0xa6, 0xd6, 0xaa, 0xb5,
|
0xc1, 0xd7, 0xe8, 0x1b, 0xa8, 0x6c, 0x38, 0xa6, 0xd6, 0xd0, 0x9a, 0xd5, 0xf6, 0x89, 0xad, 0x54,
|
||||||
0x4f, 0x6c, 0xa5, 0x62, 0xe7, 0x2a, 0xf6, 0x34, 0x67, 0xe0, 0x2d, 0x19, 0x3d, 0x03, 0x7d, 0x46,
|
0xec, 0x5c, 0xc5, 0x9e, 0xe6, 0x0c, 0x7c, 0x4f, 0x46, 0xcf, 0x41, 0x9f, 0x91, 0x30, 0x74, 0x03,
|
||||||
0xc2, 0xd0, 0x0d, 0x3c, 0x73, 0xaf, 0xa9, 0xb5, 0xca, 0xb8, 0x92, 0x9a, 0x03, 0x0f, 0xbd, 0x84,
|
0xcf, 0xdc, 0x6d, 0x68, 0xcd, 0x12, 0x2e, 0xa7, 0xe6, 0xc0, 0x43, 0xaf, 0xe0, 0x79, 0x42, 0x7f,
|
||||||
0x67, 0x09, 0xfd, 0x6d, 0x49, 0xa3, 0x19, 0x75, 0x03, 0xcf, 0xfd, 0x3d, 0x10, 0xf3, 0x20, 0x72,
|
0x5b, 0xd2, 0x68, 0x46, 0xdd, 0xc0, 0x73, 0x7f, 0x0f, 0xc4, 0x3c, 0x88, 0xdc, 0xd4, 0x69, 0xee,
|
||||||
0x53, 0xa7, 0x59, 0x92, 0xc4, 0xe3, 0xdc, 0x3d, 0xf0, 0xde, 0x49, 0x67, 0x8f, 0x84, 0x21, 0xfa,
|
0x49, 0xe2, 0x71, 0xee, 0x1e, 0x78, 0xef, 0xa5, 0xb3, 0x47, 0xc2, 0x10, 0x7d, 0x0b, 0x25, 0xb1,
|
||||||
0x16, 0xca, 0x62, 0x1d, 0x53, 0xb3, 0xdc, 0xd4, 0x5a, 0x87, 0xed, 0x2f, 0xec, 0x9d, 0xdb, 0xdb,
|
0x8e, 0xa9, 0x59, 0x6a, 0x68, 0xcd, 0xc3, 0xf6, 0x97, 0xf6, 0xd6, 0xeb, 0xed, 0xe2, 0xc5, 0x6d,
|
||||||
0xc5, 0x83, 0xdb, 0xce, 0x8a, 0x46, 0x62, 0xba, 0x8e, 0x29, 0x96, 0x61, 0xe8, 0x3b, 0xa8, 0x84,
|
0x67, 0x45, 0x23, 0x31, 0x5d, 0xc7, 0x14, 0xcb, 0x30, 0xf4, 0x1d, 0x94, 0x43, 0xe6, 0xfb, 0x94,
|
||||||
0xcc, 0xf7, 0x29, 0x37, 0xf7, 0xa5, 0xc0, 0xe7, 0xff, 0x26, 0x30, 0x94, 0x6c, 0x9c, 0x45, 0xa1,
|
0x9b, 0xfb, 0x52, 0xe0, 0x8b, 0xff, 0x12, 0x18, 0x4a, 0x36, 0xce, 0xa2, 0xd0, 0x1b, 0xa8, 0xcf,
|
||||||
0xd7, 0xd0, 0x98, 0x85, 0x01, 0x8d, 0x84, 0x3b, 0xa7, 0xc4, 0xa3, 0xdc, 0xac, 0xc8, 0x62, 0x9c,
|
0xc2, 0x80, 0x46, 0xc2, 0x9d, 0x53, 0xe2, 0x51, 0x6e, 0x96, 0x65, 0x31, 0xce, 0x1e, 0x91, 0xe9,
|
||||||
0x3d, 0x20, 0xd3, 0x93, 0xbc, 0x6b, 0x49, 0xbb, 0x7e, 0x82, 0xeb, 0xb3, 0x82, 0x9d, 0xea, 0x24,
|
0x49, 0xde, 0xb5, 0xa4, 0x5d, 0xef, 0xe0, 0xda, 0xac, 0x60, 0xa7, 0x3a, 0x09, 0xe5, 0x2b, 0xca,
|
||||||
0x94, 0xaf, 0x28, 0xcf, 0x75, 0xf4, 0x47, 0x75, 0x26, 0x92, 0xb7, 0xd5, 0x49, 0x0a, 0x36, 0xba,
|
0x73, 0x1d, 0xfd, 0x49, 0x9d, 0x89, 0xe4, 0xdd, 0xeb, 0x24, 0x05, 0x1b, 0x5d, 0x82, 0xbe, 0xa0,
|
||||||
0x04, 0x7d, 0x41, 0x93, 0x84, 0xf8, 0xd4, 0x3c, 0xc8, 0x7f, 0xcb, 0x8e, 0xc2, 0x1b, 0xc5, 0xb8,
|
0x49, 0x42, 0x7c, 0x6a, 0x1e, 0xe4, 0x9f, 0x65, 0x4b, 0xe1, 0xad, 0x62, 0x5c, 0xef, 0xe0, 0x9c,
|
||||||
0x7e, 0x82, 0x73, 0x72, 0x1a, 0x27, 0x38, 0x09, 0x42, 0xca, 0xcd, 0xea, 0xa3, 0x71, 0x53, 0xc5,
|
0x9c, 0xc6, 0x09, 0x4e, 0x82, 0x90, 0x72, 0xb3, 0xf2, 0x64, 0xdc, 0x54, 0x31, 0xd2, 0xb8, 0x8c,
|
||||||
0x48, 0xe3, 0x32, 0x32, 0xfa, 0x12, 0x8e, 0x62, 0xb2, 0x0e, 0x19, 0xf1, 0x5c, 0xc1, 0x97, 0xd1,
|
0x8c, 0xbe, 0x82, 0xa3, 0x98, 0xac, 0x43, 0x46, 0x3c, 0x57, 0xf0, 0x65, 0x34, 0x23, 0x82, 0x7a,
|
||||||
0x8c, 0x08, 0xea, 0x99, 0xd0, 0xd4, 0x5a, 0x07, 0xd8, 0xc8, 0x1c, 0xd3, 0x1c, 0x47, 0x36, 0x94,
|
0x26, 0x34, 0xb4, 0xe6, 0x01, 0x36, 0x32, 0xc7, 0x34, 0xc7, 0x91, 0x0d, 0xa5, 0x98, 0x52, 0x6e,
|
||||||
0x63, 0x4a, 0xb9, 0x59, 0x7b, 0x34, 0x43, 0xc7, 0xf3, 0x38, 0x4d, 0x12, 0x2c, 0x79, 0xd6, 0x5f,
|
0x56, 0x9f, 0xcc, 0xd0, 0xf1, 0x3c, 0x4e, 0x93, 0x04, 0x4b, 0x9e, 0xf5, 0x97, 0x06, 0x95, 0xcd,
|
||||||
0x1a, 0x54, 0x37, 0x3f, 0x0c, 0x3d, 0x05, 0xe4, 0xdc, 0x3a, 0xa3, 0xa9, 0x3b, 0xfd, 0x71, 0xec,
|
0x07, 0x43, 0xcf, 0x00, 0x39, 0xb7, 0xce, 0x68, 0xea, 0x4e, 0x7f, 0x1c, 0x3b, 0xee, 0xbb, 0xd1,
|
||||||
0xb8, 0x6f, 0x47, 0xdf, 0x8f, 0x6e, 0xde, 0x8d, 0x8c, 0x27, 0xe8, 0x14, 0xcc, 0x02, 0xde, 0x1b,
|
0xf7, 0xa3, 0x9b, 0xf7, 0x23, 0x63, 0x07, 0x9d, 0x82, 0x59, 0xc0, 0x7b, 0xc3, 0x41, 0x7a, 0xbe,
|
||||||
0x0e, 0xd2, 0xef, 0x6b, 0xa7, 0x73, 0xe5, 0x60, 0x43, 0xbb, 0xe7, 0x9d, 0x38, 0xf8, 0xd6, 0xc1,
|
0x76, 0x3a, 0x57, 0x0e, 0x36, 0xb4, 0x07, 0xde, 0x89, 0x83, 0x6f, 0x1d, 0x9c, 0x7b, 0x77, 0xd1,
|
||||||
0xb9, 0x77, 0x0f, 0x7d, 0x02, 0xcf, 0x77, 0x63, 0xdf, 0x38, 0x93, 0x49, 0xa7, 0xef, 0x18, 0xa5,
|
0x67, 0xf0, 0x62, 0x3b, 0xf6, 0xad, 0x33, 0x99, 0x74, 0xfa, 0x8e, 0xb1, 0xf7, 0xc0, 0x9d, 0x05,
|
||||||
0x7b, 0xee, 0x2c, 0x38, 0x77, 0x97, 0x51, 0x13, 0x4e, 0x1f, 0xc8, 0xdc, 0x19, 0xbe, 0x76, 0x7b,
|
0xe7, 0xee, 0x12, 0x6a, 0xc0, 0xe9, 0x23, 0x99, 0x3b, 0xc3, 0x37, 0x6e, 0x6f, 0x78, 0x33, 0x71,
|
||||||
0xc3, 0x9b, 0x89, 0x63, 0xec, 0x3f, 0x2c, 0x30, 0xc5, 0x9d, 0xc1, 0xd0, 0xc1, 0x46, 0x05, 0x7d,
|
0x8c, 0xfd, 0xc7, 0x05, 0xa6, 0xb8, 0x33, 0x18, 0x3a, 0xd8, 0x28, 0xa3, 0x4f, 0xe0, 0xa8, 0x28,
|
||||||
0x04, 0x47, 0x45, 0x81, 0xce, 0xa8, 0xe7, 0x0c, 0x0d, 0xdd, 0xea, 0x42, 0x45, 0xb5, 0x19, 0x42,
|
0xd0, 0x19, 0xf5, 0x9c, 0xa1, 0xa1, 0x5b, 0x5d, 0x28, 0xab, 0x36, 0x43, 0x08, 0x0e, 0x87, 0x37,
|
||||||
0x70, 0x38, 0xbc, 0xe9, 0xf7, 0x1d, 0x5c, 0xb8, 0xef, 0x11, 0x34, 0x32, 0x4c, 0x65, 0x34, 0xb4,
|
0xfd, 0xbe, 0x83, 0x0b, 0xef, 0x3d, 0x82, 0x7a, 0x86, 0xa9, 0x8c, 0x86, 0x56, 0x80, 0x54, 0x0a,
|
||||||
0x02, 0xa4, 0x52, 0x18, 0x7b, 0xdd, 0x2a, 0xe8, 0x59, 0xfd, 0xad, 0xf7, 0x1a, 0xd4, 0x8b, 0xcd,
|
0x63, 0xb7, 0x5b, 0x01, 0x3d, 0xab, 0xbf, 0xf5, 0x41, 0x83, 0x5a, 0xb1, 0xf9, 0xd0, 0x6b, 0x38,
|
||||||
0x87, 0x5e, 0xc1, 0xc1, 0x82, 0x0a, 0xe2, 0x11, 0x41, 0xb2, 0xe1, 0xfd, 0xf8, 0xc1, 0x2e, 0x51,
|
0x58, 0x50, 0x41, 0x3c, 0x22, 0x48, 0x36, 0xbc, 0x9f, 0x3e, 0xda, 0x25, 0x8a, 0x82, 0x37, 0x64,
|
||||||
0x14, 0xbc, 0x21, 0xa3, 0x33, 0xa8, 0x2d, 0xa8, 0x98, 0x33, 0xcf, 0x8d, 0xc8, 0x82, 0xca, 0x01,
|
0x74, 0x06, 0xd5, 0x05, 0x15, 0x73, 0xe6, 0xb9, 0x11, 0x59, 0x50, 0x39, 0xc0, 0x15, 0x0c, 0x0a,
|
||||||
0xae, 0x62, 0x50, 0xd0, 0x88, 0x2c, 0x28, 0x3a, 0x85, 0x2a, 0x59, 0x8a, 0x39, 0xe3, 0x81, 0x58,
|
0x1a, 0x91, 0x05, 0x45, 0xa7, 0x50, 0x21, 0x4b, 0x31, 0x67, 0x3c, 0x10, 0x6b, 0x39, 0xb6, 0x15,
|
||||||
0xcb, 0xb1, 0xad, 0xe2, 0x2d, 0x80, 0x2e, 0x40, 0x4f, 0x17, 0x01, 0x5b, 0x0a, 0x39, 0xae, 0xb5,
|
0x7c, 0x0f, 0xa0, 0x0b, 0xd0, 0xd3, 0x45, 0xc0, 0x96, 0x42, 0x8e, 0x6b, 0xb5, 0xfd, 0x62, 0x6b,
|
||||||
0xf6, 0xf3, 0x9d, 0x9d, 0x71, 0x95, 0x6d, 0x26, 0x9c, 0x33, 0xad, 0x3e, 0xd4, 0x8b, 0x1d, 0xff,
|
0x67, 0x5c, 0x65, 0x9b, 0x09, 0xe7, 0x4c, 0xab, 0x0f, 0xb5, 0x62, 0xc7, 0xff, 0xef, 0xcb, 0x5b,
|
||||||
0x9f, 0x0f, 0x6f, 0xfd, 0xa1, 0x81, 0x9e, 0x75, 0xf0, 0xff, 0xaa, 0x40, 0x22, 0x88, 0x58, 0x26,
|
0x7f, 0x68, 0xa0, 0x67, 0x1d, 0xfc, 0x51, 0x15, 0x48, 0x04, 0x11, 0xcb, 0xc4, 0x9d, 0x31, 0x4f,
|
||||||
0xee, 0x8c, 0x79, 0xaa, 0x02, 0x0d, 0x0c, 0x0a, 0xea, 0x31, 0x8f, 0xa2, 0xcf, 0xe0, 0x30, 0x23,
|
0x55, 0xa0, 0x8e, 0x41, 0x41, 0x3d, 0xe6, 0x51, 0xf4, 0x39, 0x1c, 0x66, 0x84, 0x7c, 0x0e, 0x55,
|
||||||
0xe4, 0x73, 0xa8, 0xca, 0xd0, 0x50, 0x68, 0x36, 0x7a, 0x05, 0x9a, 0x47, 0x05, 0x09, 0xc2, 0x44,
|
0x19, 0xea, 0x0a, 0xcd, 0x46, 0xaf, 0x40, 0xf3, 0xa8, 0x20, 0x41, 0x98, 0xc8, 0x8a, 0xd4, 0x72,
|
||||||
0x56, 0xa4, 0x9e, 0xd3, 0xae, 0x14, 0x68, 0xbd, 0x04, 0x3d, 0x8f, 0x78, 0x0a, 0x95, 0x90, 0x46,
|
0xda, 0x95, 0x02, 0xad, 0x57, 0xa0, 0xe7, 0x11, 0xcf, 0xa0, 0x1c, 0xd2, 0xc8, 0x17, 0x73, 0x79,
|
||||||
0xbe, 0x98, 0xcb, 0x03, 0x37, 0x70, 0x66, 0x21, 0x04, 0x65, 0x79, 0x8d, 0x3d, 0x19, 0x2f, 0xbf,
|
0xe1, 0x3a, 0xce, 0x2c, 0x84, 0xa0, 0x24, 0x9f, 0xb1, 0x2b, 0xe3, 0xe5, 0xd9, 0xea, 0xc2, 0x41,
|
||||||
0xad, 0x2e, 0x1c, 0xe4, 0x67, 0x47, 0x97, 0xb0, 0x4f, 0xd3, 0xcd, 0x65, 0x6a, 0xcd, 0x52, 0xab,
|
0x7e, 0x77, 0x74, 0x09, 0xfb, 0x34, 0xdd, 0x5c, 0xa6, 0xd6, 0xd8, 0x6b, 0x56, 0xdb, 0x8d, 0x7f,
|
||||||
0xd6, 0x6e, 0x7e, 0xe0, 0x9e, 0x72, 0xc3, 0x61, 0x45, 0xb7, 0x5e, 0x41, 0xe3, 0x1f, 0x38, 0x32,
|
0x79, 0xa7, 0xdc, 0x70, 0x58, 0xd1, 0xad, 0xd7, 0x50, 0xff, 0x07, 0x8e, 0x0c, 0xd8, 0xfb, 0x95,
|
||||||
0xa0, 0xf4, 0x2b, 0x5d, 0xcb, 0xec, 0x55, 0x9c, 0x7e, 0xa2, 0x63, 0xd8, 0x5f, 0x91, 0x70, 0x49,
|
0xae, 0x65, 0xf6, 0x0a, 0x4e, 0x8f, 0xe8, 0x18, 0xf6, 0x57, 0x24, 0x5c, 0xd2, 0x2c, 0xb7, 0x32,
|
||||||
0xb3, 0xdc, 0xca, 0xb0, 0xfe, 0xd4, 0x40, 0xcf, 0xe6, 0x18, 0x5d, 0x64, 0xdb, 0x59, 0x93, 0xcb,
|
0xac, 0x3f, 0x35, 0xd0, 0xb3, 0x39, 0x46, 0x17, 0xd9, 0x76, 0xd6, 0xe4, 0x72, 0x3d, 0x7b, 0x7a,
|
||||||
0xf5, 0xec, 0xf1, 0x89, 0xb7, 0x0b, 0x3b, 0xd9, 0x04, 0x9d, 0x28, 0x34, 0xeb, 0xb0, 0xdc, 0x4c,
|
0xe2, 0xed, 0xc2, 0x4e, 0x36, 0x41, 0x27, 0x0a, 0xcd, 0x3a, 0x2c, 0x37, 0xd3, 0x9f, 0x47, 0x10,
|
||||||
0x1f, 0x8f, 0x20, 0x76, 0x63, 0xc6, 0x85, 0xac, 0x6a, 0x03, 0x57, 0x82, 0x78, 0xcc, 0xb8, 0xb0,
|
0xbb, 0x31, 0xe3, 0x42, 0x56, 0xb5, 0x8e, 0xcb, 0x41, 0x3c, 0x66, 0x5c, 0x58, 0x0e, 0x94, 0xe4,
|
||||||
0x1c, 0x28, 0xcb, 0x1d, 0x61, 0x40, 0xfd, 0xde, 0x76, 0x68, 0x40, 0x55, 0x22, 0x83, 0xf1, 0xed,
|
0x8e, 0x30, 0xa0, 0xf6, 0x60, 0x3b, 0xd4, 0xa1, 0x22, 0x91, 0xc1, 0xf8, 0xf6, 0x6b, 0x43, 0x2b,
|
||||||
0xd7, 0x86, 0x56, 0x34, 0x2f, 0x8d, 0xbd, 0x8d, 0xf9, 0x76, 0x34, 0xf8, 0xc1, 0x28, 0x75, 0x7f,
|
0x9a, 0x97, 0xc6, 0xee, 0xc6, 0x7c, 0x37, 0x1a, 0xfc, 0x60, 0xec, 0x75, 0x7f, 0x86, 0xe3, 0x80,
|
||||||
0x86, 0xe3, 0x80, 0xed, 0x1e, 0xb2, 0x7b, 0xd8, 0x95, 0xd6, 0x90, 0xf9, 0xe3, 0xb4, 0x51, 0xc7,
|
0x6d, 0x5f, 0xb2, 0x7b, 0xd8, 0x95, 0xd6, 0x90, 0xf9, 0xe3, 0xb4, 0x51, 0xc7, 0xda, 0x4f, 0xed,
|
||||||
0xda, 0x4f, 0xed, 0xac, 0x71, 0x7d, 0x16, 0x92, 0xc8, 0xb7, 0x19, 0x57, 0x4f, 0xf3, 0x87, 0x5e,
|
0xac, 0x71, 0x7d, 0x16, 0x92, 0xc8, 0xb7, 0x19, 0xf7, 0x5b, 0xf9, 0x7f, 0x59, 0x85, 0x49, 0xd3,
|
||||||
0xea, 0xbb, 0x8a, 0xec, 0xf2, 0x8b, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0xe7, 0xf6, 0x4b, 0x50,
|
0xdd, 0x98, 0xee, 0xea, 0xfc, 0xae, 0x2c, 0xbb, 0xfc, 0xe2, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff,
|
||||||
0xd4, 0x07, 0x00, 0x00,
|
0x10, 0x93, 0x68, 0x41, 0xc2, 0x07, 0x00, 0x00,
|
||||||
}
|
}
|
||||||
|
|
44
vendor/google.golang.org/grpc/clientconn.go
generated
vendored
44
vendor/google.golang.org/grpc/clientconn.go
generated
vendored
|
@ -68,8 +68,6 @@ var (
|
||||||
errConnDrain = errors.New("grpc: the connection is drained")
|
errConnDrain = errors.New("grpc: the connection is drained")
|
||||||
// errConnClosing indicates that the connection is closing.
|
// errConnClosing indicates that the connection is closing.
|
||||||
errConnClosing = errors.New("grpc: the connection is closing")
|
errConnClosing = errors.New("grpc: the connection is closing")
|
||||||
// errBalancerClosed indicates that the balancer is closed.
|
|
||||||
errBalancerClosed = errors.New("grpc: balancer is closed")
|
|
||||||
// invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default
|
// invalidDefaultServiceConfigErrPrefix is used to prefix the json parsing error for the default
|
||||||
// service config.
|
// service config.
|
||||||
invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid"
|
invalidDefaultServiceConfigErrPrefix = "grpc: the provided default service config is invalid"
|
||||||
|
@ -217,7 +215,14 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
|
||||||
defer func() {
|
defer func() {
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
|
switch {
|
||||||
|
case ctx.Err() == err:
|
||||||
|
conn = nil
|
||||||
|
case err == nil || !cc.dopts.returnLastError:
|
||||||
conn, err = nil, ctx.Err()
|
conn, err = nil, ctx.Err()
|
||||||
|
default:
|
||||||
|
conn, err = nil, fmt.Errorf("%v: %v", ctx.Err(), err)
|
||||||
|
}
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
@ -311,7 +316,7 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
|
||||||
if s == connectivity.Ready {
|
if s == connectivity.Ready {
|
||||||
break
|
break
|
||||||
} else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure {
|
} else if cc.dopts.copts.FailOnNonTempDialError && s == connectivity.TransientFailure {
|
||||||
if err = cc.blockingpicker.connectionError(); err != nil {
|
if err = cc.connectionError(); err != nil {
|
||||||
terr, ok := err.(interface {
|
terr, ok := err.(interface {
|
||||||
Temporary() bool
|
Temporary() bool
|
||||||
})
|
})
|
||||||
|
@ -322,6 +327,9 @@ func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *
|
||||||
}
|
}
|
||||||
if !cc.WaitForStateChange(ctx, s) {
|
if !cc.WaitForStateChange(ctx, s) {
|
||||||
// ctx got timeout or canceled.
|
// ctx got timeout or canceled.
|
||||||
|
if err = cc.connectionError(); err != nil && cc.dopts.returnLastError {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
return nil, ctx.Err()
|
return nil, ctx.Err()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -490,6 +498,9 @@ type ClientConn struct {
|
||||||
|
|
||||||
channelzID int64 // channelz unique identification number
|
channelzID int64 // channelz unique identification number
|
||||||
czData *channelzData
|
czData *channelzData
|
||||||
|
|
||||||
|
lceMu sync.Mutex // protects lastConnectionError
|
||||||
|
lastConnectionError error
|
||||||
}
|
}
|
||||||
|
|
||||||
// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or
|
// WaitForStateChange waits until the connectivity.State of ClientConn changes from sourceState or
|
||||||
|
@ -1199,7 +1210,7 @@ func (ac *addrConn) tryAllAddrs(addrs []resolver.Address, connectDeadline time.T
|
||||||
if firstConnErr == nil {
|
if firstConnErr == nil {
|
||||||
firstConnErr = err
|
firstConnErr = err
|
||||||
}
|
}
|
||||||
ac.cc.blockingpicker.updateConnectionError(err)
|
ac.cc.updateConnectionError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Couldn't connect to any address.
|
// Couldn't connect to any address.
|
||||||
|
@ -1214,16 +1225,9 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
|
||||||
onCloseCalled := make(chan struct{})
|
onCloseCalled := make(chan struct{})
|
||||||
reconnect := grpcsync.NewEvent()
|
reconnect := grpcsync.NewEvent()
|
||||||
|
|
||||||
authority := ac.cc.authority
|
|
||||||
// addr.ServerName takes precedent over ClientConn authority, if present.
|
// addr.ServerName takes precedent over ClientConn authority, if present.
|
||||||
if addr.ServerName != "" {
|
if addr.ServerName == "" {
|
||||||
authority = addr.ServerName
|
addr.ServerName = ac.cc.authority
|
||||||
}
|
|
||||||
|
|
||||||
target := transport.TargetInfo{
|
|
||||||
Addr: addr.Addr,
|
|
||||||
Metadata: addr.Metadata,
|
|
||||||
Authority: authority,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
once := sync.Once{}
|
once := sync.Once{}
|
||||||
|
@ -1269,7 +1273,7 @@ func (ac *addrConn) createTransport(addr resolver.Address, copts transport.Conne
|
||||||
copts.ChannelzParentID = ac.channelzID
|
copts.ChannelzParentID = ac.channelzID
|
||||||
}
|
}
|
||||||
|
|
||||||
newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, target, copts, onPrefaceReceipt, onGoAway, onClose)
|
newTr, err := transport.NewClientTransport(connectCtx, ac.cc.ctx, addr, copts, onPrefaceReceipt, onGoAway, onClose)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// newTr is either nil, or closed.
|
// newTr is either nil, or closed.
|
||||||
channelz.Warningf(ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v. Err: %v. Reconnecting...", addr, err)
|
channelz.Warningf(ac.channelzID, "grpc: addrConn.createTransport failed to connect to %v. Err: %v. Reconnecting...", addr, err)
|
||||||
|
@ -1532,3 +1536,15 @@ func (cc *ClientConn) getResolver(scheme string) resolver.Builder {
|
||||||
}
|
}
|
||||||
return resolver.Get(scheme)
|
return resolver.Get(scheme)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (cc *ClientConn) updateConnectionError(err error) {
|
||||||
|
cc.lceMu.Lock()
|
||||||
|
cc.lastConnectionError = err
|
||||||
|
cc.lceMu.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cc *ClientConn) connectionError() error {
|
||||||
|
cc.lceMu.Lock()
|
||||||
|
defer cc.lceMu.Unlock()
|
||||||
|
return cc.lastConnectionError
|
||||||
|
}
|
||||||
|
|
56
vendor/google.golang.org/grpc/credentials/credentials.go
generated
vendored
56
vendor/google.golang.org/grpc/credentials/credentials.go
generated
vendored
|
@ -29,6 +29,7 @@ import (
|
||||||
"net"
|
"net"
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
|
"google.golang.org/grpc/attributes"
|
||||||
"google.golang.org/grpc/internal"
|
"google.golang.org/grpc/internal"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -57,9 +58,11 @@ type PerRPCCredentials interface {
|
||||||
type SecurityLevel int
|
type SecurityLevel int
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// NoSecurity indicates a connection is insecure.
|
// Invalid indicates an invalid security level.
|
||||||
// The zero SecurityLevel value is invalid for backward compatibility.
|
// The zero SecurityLevel value is invalid for backward compatibility.
|
||||||
NoSecurity SecurityLevel = iota + 1
|
Invalid SecurityLevel = iota
|
||||||
|
// NoSecurity indicates a connection is insecure.
|
||||||
|
NoSecurity
|
||||||
// IntegrityOnly indicates a connection only provides integrity protection.
|
// IntegrityOnly indicates a connection only provides integrity protection.
|
||||||
IntegrityOnly
|
IntegrityOnly
|
||||||
// PrivacyAndIntegrity indicates a connection provides both privacy and integrity protection.
|
// PrivacyAndIntegrity indicates a connection provides both privacy and integrity protection.
|
||||||
|
@ -124,15 +127,18 @@ var ErrConnDispatched = errors.New("credentials: rawConn is dispatched out of gR
|
||||||
// TransportCredentials defines the common interface for all the live gRPC wire
|
// TransportCredentials defines the common interface for all the live gRPC wire
|
||||||
// protocols and supported transport security protocols (e.g., TLS, SSL).
|
// protocols and supported transport security protocols (e.g., TLS, SSL).
|
||||||
type TransportCredentials interface {
|
type TransportCredentials interface {
|
||||||
// ClientHandshake does the authentication handshake specified by the corresponding
|
// ClientHandshake does the authentication handshake specified by the
|
||||||
// authentication protocol on rawConn for clients. It returns the authenticated
|
// corresponding authentication protocol on rawConn for clients. It returns
|
||||||
// connection and the corresponding auth information about the connection.
|
// the authenticated connection and the corresponding auth information
|
||||||
// The auth information should embed CommonAuthInfo to return additional information about
|
// about the connection. The auth information should embed CommonAuthInfo
|
||||||
// the credentials. Implementations must use the provided context to implement timely cancellation.
|
// to return additional information about the credentials. Implementations
|
||||||
// gRPC will try to reconnect if the error returned is a temporary error
|
// must use the provided context to implement timely cancellation. gRPC
|
||||||
// (io.EOF, context.DeadlineExceeded or err.Temporary() == true).
|
// will try to reconnect if the error returned is a temporary error
|
||||||
// If the returned error is a wrapper error, implementations should make sure that
|
// (io.EOF, context.DeadlineExceeded or err.Temporary() == true). If the
|
||||||
|
// returned error is a wrapper error, implementations should make sure that
|
||||||
// the error implements Temporary() to have the correct retry behaviors.
|
// the error implements Temporary() to have the correct retry behaviors.
|
||||||
|
// Additionally, ClientHandshakeInfo data will be available via the context
|
||||||
|
// passed to this call.
|
||||||
//
|
//
|
||||||
// If the returned net.Conn is closed, it MUST close the net.Conn provided.
|
// If the returned net.Conn is closed, it MUST close the net.Conn provided.
|
||||||
ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error)
|
ClientHandshake(context.Context, string, net.Conn) (net.Conn, AuthInfo, error)
|
||||||
|
@ -193,6 +199,31 @@ func RequestInfoFromContext(ctx context.Context) (ri RequestInfo, ok bool) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ClientHandshakeInfo holds data to be passed to ClientHandshake. This makes
|
||||||
|
// it possible to pass arbitrary data to the handshaker from gRPC, resolver,
|
||||||
|
// balancer etc. Individual credential implementations control the actual
|
||||||
|
// format of the data that they are willing to receive.
|
||||||
|
//
|
||||||
|
// This API is experimental.
|
||||||
|
type ClientHandshakeInfo struct {
|
||||||
|
// Attributes contains the attributes for the address. It could be provided
|
||||||
|
// by the gRPC, resolver, balancer etc.
|
||||||
|
Attributes *attributes.Attributes
|
||||||
|
}
|
||||||
|
|
||||||
|
// clientHandshakeInfoKey is a struct used as the key to store
|
||||||
|
// ClientHandshakeInfo in a context.
|
||||||
|
type clientHandshakeInfoKey struct{}
|
||||||
|
|
||||||
|
// ClientHandshakeInfoFromContext returns the ClientHandshakeInfo struct stored
|
||||||
|
// in ctx.
|
||||||
|
//
|
||||||
|
// This API is experimental.
|
||||||
|
func ClientHandshakeInfoFromContext(ctx context.Context) ClientHandshakeInfo {
|
||||||
|
chi, _ := ctx.Value(clientHandshakeInfoKey{}).(ClientHandshakeInfo)
|
||||||
|
return chi
|
||||||
|
}
|
||||||
|
|
||||||
// CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one.
|
// CheckSecurityLevel checks if a connection's security level is greater than or equal to the specified one.
|
||||||
// It returns success if 1) the condition is satisified or 2) AuthInfo struct does not implement GetCommonAuthInfo() method
|
// It returns success if 1) the condition is satisified or 2) AuthInfo struct does not implement GetCommonAuthInfo() method
|
||||||
// or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility.
|
// or 3) CommonAuthInfo.SecurityLevel has an invalid zero value. For 2) and 3), it is for the purpose of backward-compatibility.
|
||||||
|
@ -208,7 +239,7 @@ func CheckSecurityLevel(ctx context.Context, level SecurityLevel) error {
|
||||||
}
|
}
|
||||||
if ci, ok := ri.AuthInfo.(internalInfo); ok {
|
if ci, ok := ri.AuthInfo.(internalInfo); ok {
|
||||||
// CommonAuthInfo.SecurityLevel has an invalid value.
|
// CommonAuthInfo.SecurityLevel has an invalid value.
|
||||||
if ci.GetCommonAuthInfo().SecurityLevel == 0 {
|
if ci.GetCommonAuthInfo().SecurityLevel == Invalid {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if ci.GetCommonAuthInfo().SecurityLevel < level {
|
if ci.GetCommonAuthInfo().SecurityLevel < level {
|
||||||
|
@ -223,6 +254,9 @@ func init() {
|
||||||
internal.NewRequestInfoContext = func(ctx context.Context, ri RequestInfo) context.Context {
|
internal.NewRequestInfoContext = func(ctx context.Context, ri RequestInfo) context.Context {
|
||||||
return context.WithValue(ctx, requestInfoKey{}, ri)
|
return context.WithValue(ctx, requestInfoKey{}, ri)
|
||||||
}
|
}
|
||||||
|
internal.NewClientHandshakeInfoContext = func(ctx context.Context, chi ClientHandshakeInfo) context.Context {
|
||||||
|
return context.WithValue(ctx, clientHandshakeInfoKey{}, chi)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ChannelzSecurityInfo defines the interface that security protocols should implement
|
// ChannelzSecurityInfo defines the interface that security protocols should implement
|
||||||
|
|
32
vendor/google.golang.org/grpc/dialoptions.go
generated
vendored
32
vendor/google.golang.org/grpc/dialoptions.go
generated
vendored
|
@ -50,14 +50,14 @@ type dialOptions struct {
|
||||||
dc Decompressor
|
dc Decompressor
|
||||||
bs internalbackoff.Strategy
|
bs internalbackoff.Strategy
|
||||||
block bool
|
block bool
|
||||||
|
returnLastError bool
|
||||||
insecure bool
|
insecure bool
|
||||||
timeout time.Duration
|
timeout time.Duration
|
||||||
scChan <-chan ServiceConfig
|
scChan <-chan ServiceConfig
|
||||||
authority string
|
authority string
|
||||||
copts transport.ConnectOptions
|
copts transport.ConnectOptions
|
||||||
callOptions []CallOption
|
callOptions []CallOption
|
||||||
// This is used by v1 balancer dial option WithBalancer to support v1
|
// This is used by WithBalancerName dial option.
|
||||||
// balancer, and also by WithBalancerName dial option.
|
|
||||||
balancerBuilder balancer.Builder
|
balancerBuilder balancer.Builder
|
||||||
channelzParentID int64
|
channelzParentID int64
|
||||||
disableServiceConfig bool
|
disableServiceConfig bool
|
||||||
|
@ -199,19 +199,6 @@ func WithDecompressor(dc Decompressor) DialOption {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithBalancer returns a DialOption which sets a load balancer with the v1 API.
|
|
||||||
// Name resolver will be ignored if this DialOption is specified.
|
|
||||||
//
|
|
||||||
// Deprecated: use the new balancer APIs in balancer package and
|
|
||||||
// WithBalancerName. Will be removed in a future 1.x release.
|
|
||||||
func WithBalancer(b Balancer) DialOption {
|
|
||||||
return newFuncDialOption(func(o *dialOptions) {
|
|
||||||
o.balancerBuilder = &balancerWrapperBuilder{
|
|
||||||
b: b,
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithBalancerName sets the balancer that the ClientConn will be initialized
|
// WithBalancerName sets the balancer that the ClientConn will be initialized
|
||||||
// with. Balancer registered with balancerName will be used. This function
|
// with. Balancer registered with balancerName will be used. This function
|
||||||
// panics if no balancer was registered by balancerName.
|
// panics if no balancer was registered by balancerName.
|
||||||
|
@ -299,6 +286,19 @@ func WithBlock() DialOption {
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithReturnConnectionError returns a DialOption which makes the client connection
|
||||||
|
// return a string containing both the last connection error that occurred and
|
||||||
|
// the context.DeadlineExceeded error.
|
||||||
|
// Implies WithBlock()
|
||||||
|
//
|
||||||
|
// This API is EXPERIMENTAL.
|
||||||
|
func WithReturnConnectionError() DialOption {
|
||||||
|
return newFuncDialOption(func(o *dialOptions) {
|
||||||
|
o.block = true
|
||||||
|
o.returnLastError = true
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
// WithInsecure returns a DialOption which disables transport security for this
|
// WithInsecure returns a DialOption which disables transport security for this
|
||||||
// ClientConn. Note that transport security is required unless WithInsecure is
|
// ClientConn. Note that transport security is required unless WithInsecure is
|
||||||
// set.
|
// set.
|
||||||
|
@ -459,7 +459,7 @@ func WithStreamInterceptor(f StreamClientInterceptor) DialOption {
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithChainStreamInterceptor returns a DialOption that specifies the chained
|
// WithChainStreamInterceptor returns a DialOption that specifies the chained
|
||||||
// interceptor for unary RPCs. The first interceptor will be the outer most,
|
// interceptor for streaming RPCs. The first interceptor will be the outer most,
|
||||||
// while the last interceptor will be the inner most wrapper around the real call.
|
// while the last interceptor will be the inner most wrapper around the real call.
|
||||||
// All interceptors added by this method will be chained, and the interceptor
|
// All interceptors added by this method will be chained, and the interceptor
|
||||||
// defined by WithStreamInterceptor will always be prepended to the chain.
|
// defined by WithStreamInterceptor will always be prepended to the chain.
|
||||||
|
|
2
vendor/google.golang.org/grpc/doc.go
generated
vendored
2
vendor/google.golang.org/grpc/doc.go
generated
vendored
|
@ -16,6 +16,8 @@
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
//go:generate ./regenerate.sh
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Package grpc implements an RPC system called gRPC.
|
Package grpc implements an RPC system called gRPC.
|
||||||
|
|
||||||
|
|
3
vendor/google.golang.org/grpc/go.mod
generated
vendored
3
vendor/google.golang.org/grpc/go.mod
generated
vendored
|
@ -6,9 +6,8 @@ require (
|
||||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f
|
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f
|
||||||
github.com/envoyproxy/go-control-plane v0.9.4
|
github.com/envoyproxy/go-control-plane v0.9.4
|
||||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b
|
||||||
github.com/golang/mock v1.1.1
|
|
||||||
github.com/golang/protobuf v1.3.3
|
github.com/golang/protobuf v1.3.3
|
||||||
github.com/google/go-cmp v0.2.0
|
github.com/google/go-cmp v0.4.0
|
||||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a
|
golang.org/x/net v0.0.0-20190311183353-d8887717615a
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a
|
||||||
|
|
4
vendor/google.golang.org/grpc/go.sum
generated
vendored
4
vendor/google.golang.org/grpc/go.sum
generated
vendored
|
@ -23,6 +23,8 @@ github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I
|
||||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||||
github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
|
github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ=
|
||||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||||
|
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
|
||||||
|
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||||
|
@ -50,6 +52,8 @@ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3
|
||||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135 h1:5Beo0mZN8dRzgrMMkDp0jc8YXQKx9DiJ2k1dkvGsn5A=
|
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135 h1:5Beo0mZN8dRzgrMMkDp0jc8YXQKx9DiJ2k1dkvGsn5A=
|
||||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||||
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||||
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||||
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
|
google.golang.org/appengine v1.4.0 h1:/wp5JvzpHIxhs/dumFmF7BXTf3Z+dd4uXta4kVyO508=
|
||||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||||
|
|
3
vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
generated
vendored
3
vendor/google.golang.org/grpc/internal/binarylog/binarylog.go
generated
vendored
|
@ -25,6 +25,7 @@ import (
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"google.golang.org/grpc/grpclog"
|
"google.golang.org/grpc/grpclog"
|
||||||
|
"google.golang.org/grpc/internal/grpcutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Logger is the global binary logger. It can be used to get binary logger for
|
// Logger is the global binary logger. It can be used to get binary logger for
|
||||||
|
@ -146,7 +147,7 @@ func (l *logger) setBlacklist(method string) error {
|
||||||
// Each methodLogger returned by this method is a new instance. This is to
|
// Each methodLogger returned by this method is a new instance. This is to
|
||||||
// generate sequence id within the call.
|
// generate sequence id within the call.
|
||||||
func (l *logger) getMethodLogger(methodName string) *MethodLogger {
|
func (l *logger) getMethodLogger(methodName string) *MethodLogger {
|
||||||
s, m, err := parseMethodName(methodName)
|
s, m, err := grpcutil.ParseMethod(methodName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
grpclog.Infof("binarylogging: failed to parse %q: %v", methodName, err)
|
grpclog.Infof("binarylogging: failed to parse %q: %v", methodName, err)
|
||||||
return nil
|
return nil
|
||||||
|
|
33
vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh
generated
vendored
33
vendor/google.golang.org/grpc/internal/binarylog/regenerate.sh
generated
vendored
|
@ -1,33 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
# Copyright 2018 gRPC authors.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
set -eux -o pipefail
|
|
||||||
|
|
||||||
TMP=$(mktemp -d)
|
|
||||||
|
|
||||||
function finish {
|
|
||||||
rm -rf "$TMP"
|
|
||||||
}
|
|
||||||
trap finish EXIT
|
|
||||||
|
|
||||||
pushd "$TMP"
|
|
||||||
mkdir -p grpc/binarylog/grpc_binarylog_v1
|
|
||||||
curl https://raw.githubusercontent.com/grpc/grpc-proto/master/grpc/binlog/v1/binarylog.proto > grpc/binarylog/grpc_binarylog_v1/binarylog.proto
|
|
||||||
|
|
||||||
protoc --go_out=plugins=grpc,paths=source_relative:. -I. grpc/binarylog/grpc_binarylog_v1/*.proto
|
|
||||||
popd
|
|
||||||
rm -f ./grpc_binarylog_v1/*.pb.go
|
|
||||||
cp "$TMP"/grpc/binarylog/grpc_binarylog_v1/*.pb.go ../../binarylog/grpc_binarylog_v1/
|
|
||||||
|
|
2
vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
generated
vendored
2
vendor/google.golang.org/grpc/internal/envconfig/envconfig.go
generated
vendored
|
@ -34,5 +34,5 @@ var (
|
||||||
// Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on".
|
// Retry is set if retry is explicitly enabled via "GRPC_GO_RETRY=on".
|
||||||
Retry = strings.EqualFold(os.Getenv(retryStr), "on")
|
Retry = strings.EqualFold(os.Getenv(retryStr), "on")
|
||||||
// TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false").
|
// TXTErrIgnore is set if TXT errors should be ignored ("GRPC_GO_IGNORE_TXT_ERRORS" is not "false").
|
||||||
TXTErrIgnore = !strings.EqualFold(os.Getenv(retryStr), "false")
|
TXTErrIgnore = !strings.EqualFold(os.Getenv(txtErrIgnoreStr), "false")
|
||||||
)
|
)
|
||||||
|
|
|
@ -16,18 +16,17 @@
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
package binarylog
|
package grpcutil
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
|
|
||||||
// parseMethodName splits service and method from the input. It expects format
|
// ParseMethod splits service and method from the input. It expects format
|
||||||
// "/service/method".
|
// "/service/method".
|
||||||
//
|
//
|
||||||
// TODO: move to internal/grpcutil.
|
func ParseMethod(methodName string) (service, method string, _ error) {
|
||||||
func parseMethodName(methodName string) (service, method string, _ error) {
|
|
||||||
if !strings.HasPrefix(methodName, "/") {
|
if !strings.HasPrefix(methodName, "/") {
|
||||||
return "", "", errors.New("invalid method name: should start with /")
|
return "", "", errors.New("invalid method name: should start with /")
|
||||||
}
|
}
|
9
vendor/google.golang.org/grpc/internal/internal.go
generated
vendored
9
vendor/google.golang.org/grpc/internal/internal.go
generated
vendored
|
@ -25,6 +25,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"google.golang.org/grpc/connectivity"
|
"google.golang.org/grpc/connectivity"
|
||||||
|
"google.golang.org/grpc/serviceconfig"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -40,9 +41,17 @@ var (
|
||||||
// NewRequestInfoContext creates a new context based on the argument context attaching
|
// NewRequestInfoContext creates a new context based on the argument context attaching
|
||||||
// the passed in RequestInfo to the new context.
|
// the passed in RequestInfo to the new context.
|
||||||
NewRequestInfoContext interface{} // func(context.Context, credentials.RequestInfo) context.Context
|
NewRequestInfoContext interface{} // func(context.Context, credentials.RequestInfo) context.Context
|
||||||
|
// NewClientHandshakeInfoContext returns a copy of the input context with
|
||||||
|
// the passed in ClientHandshakeInfo struct added to it.
|
||||||
|
NewClientHandshakeInfoContext interface{} // func(context.Context, credentials.ClientHandshakeInfo) context.Context
|
||||||
// ParseServiceConfigForTesting is for creating a fake
|
// ParseServiceConfigForTesting is for creating a fake
|
||||||
// ClientConn for resolver testing only
|
// ClientConn for resolver testing only
|
||||||
ParseServiceConfigForTesting interface{} // func(string) *serviceconfig.ParseResult
|
ParseServiceConfigForTesting interface{} // func(string) *serviceconfig.ParseResult
|
||||||
|
// EqualServiceConfigForTesting is for testing service config generation and
|
||||||
|
// parsing. Both a and b should be returned by ParseServiceConfigForTesting.
|
||||||
|
// This function compares the config without rawJSON stripped, in case the
|
||||||
|
// there's difference in white space.
|
||||||
|
EqualServiceConfigForTesting func(a, b serviceconfig.Config) bool
|
||||||
)
|
)
|
||||||
|
|
||||||
// HealthChecker defines the signature of the client-side LB channel health checking function.
|
// HealthChecker defines the signature of the client-side LB channel health checking function.
|
||||||
|
|
11
vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
generated
vendored
11
vendor/google.golang.org/grpc/internal/resolver/dns/dns_resolver.go
generated
vendored
|
@ -32,6 +32,7 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
grpclbstate "google.golang.org/grpc/balancer/grpclb/state"
|
||||||
"google.golang.org/grpc/grpclog"
|
"google.golang.org/grpc/grpclog"
|
||||||
"google.golang.org/grpc/internal/envconfig"
|
"google.golang.org/grpc/internal/envconfig"
|
||||||
"google.golang.org/grpc/internal/grpcrand"
|
"google.golang.org/grpc/internal/grpcrand"
|
||||||
|
@ -251,7 +252,7 @@ func (d *dnsResolver) lookupSRV() ([]resolver.Address, error) {
|
||||||
return nil, fmt.Errorf("dns: error parsing A record IP address %v", a)
|
return nil, fmt.Errorf("dns: error parsing A record IP address %v", a)
|
||||||
}
|
}
|
||||||
addr := ip + ":" + strconv.Itoa(int(s.Port))
|
addr := ip + ":" + strconv.Itoa(int(s.Port))
|
||||||
newAddrs = append(newAddrs, resolver.Address{Addr: addr, Type: resolver.GRPCLB, ServerName: s.Target})
|
newAddrs = append(newAddrs, resolver.Address{Addr: addr, ServerName: s.Target})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return newAddrs, nil
|
return newAddrs, nil
|
||||||
|
@ -326,13 +327,15 @@ func (d *dnsResolver) lookup() (*resolver.State, error) {
|
||||||
if hostErr != nil && (srvErr != nil || len(srv) == 0) {
|
if hostErr != nil && (srvErr != nil || len(srv) == 0) {
|
||||||
return nil, hostErr
|
return nil, hostErr
|
||||||
}
|
}
|
||||||
state := &resolver.State{
|
|
||||||
Addresses: append(addrs, srv...),
|
state := resolver.State{Addresses: addrs}
|
||||||
|
if len(srv) > 0 {
|
||||||
|
state = grpclbstate.Set(state, &grpclbstate.State{BalancerAddresses: srv})
|
||||||
}
|
}
|
||||||
if !d.disableServiceConfig {
|
if !d.disableServiceConfig {
|
||||||
state.ServiceConfig = d.lookupTXT()
|
state.ServiceConfig = d.lookupTXT()
|
||||||
}
|
}
|
||||||
return state, nil
|
return &state, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// formatIP returns ok = false if addr is not a valid textual representation of an IP address.
|
// formatIP returns ok = false if addr is not a valid textual representation of an IP address.
|
||||||
|
|
90
vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go
generated
vendored
Normal file
90
vendor/google.golang.org/grpc/internal/serviceconfig/serviceconfig.go
generated
vendored
Normal file
|
@ -0,0 +1,90 @@
|
||||||
|
/*
|
||||||
|
*
|
||||||
|
* Copyright 2020 gRPC authors.
|
||||||
|
*
|
||||||
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
* you may not use this file except in compliance with the License.
|
||||||
|
* You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package serviceconfig contains utility functions to parse service config.
|
||||||
|
package serviceconfig
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"google.golang.org/grpc/balancer"
|
||||||
|
"google.golang.org/grpc/grpclog"
|
||||||
|
externalserviceconfig "google.golang.org/grpc/serviceconfig"
|
||||||
|
)
|
||||||
|
|
||||||
|
// BalancerConfig is the balancer config part that service config's
|
||||||
|
// loadBalancingConfig fields can be unmarshalled to. It's a json unmarshaller.
|
||||||
|
//
|
||||||
|
// https://github.com/grpc/grpc-proto/blob/54713b1e8bc6ed2d4f25fb4dff527842150b91b2/grpc/service_config/service_config.proto#L247
|
||||||
|
type BalancerConfig struct {
|
||||||
|
Name string
|
||||||
|
Config externalserviceconfig.LoadBalancingConfig
|
||||||
|
}
|
||||||
|
|
||||||
|
type intermediateBalancerConfig []map[string]json.RawMessage
|
||||||
|
|
||||||
|
// UnmarshalJSON implements json unmarshaller.
|
||||||
|
func (bc *BalancerConfig) UnmarshalJSON(b []byte) error {
|
||||||
|
var ir intermediateBalancerConfig
|
||||||
|
err := json.Unmarshal(b, &ir)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, lbcfg := range ir {
|
||||||
|
if len(lbcfg) != 1 {
|
||||||
|
return fmt.Errorf("invalid loadBalancingConfig: entry %v does not contain exactly 1 policy/config pair: %q", i, lbcfg)
|
||||||
|
}
|
||||||
|
var (
|
||||||
|
name string
|
||||||
|
jsonCfg json.RawMessage
|
||||||
|
)
|
||||||
|
// Get the key:value pair from the map.
|
||||||
|
for name, jsonCfg = range lbcfg {
|
||||||
|
}
|
||||||
|
builder := balancer.Get(name)
|
||||||
|
if builder == nil {
|
||||||
|
// If the balancer is not registered, move on to the next config.
|
||||||
|
// This is not an error.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
bc.Name = name
|
||||||
|
|
||||||
|
parser, ok := builder.(balancer.ConfigParser)
|
||||||
|
if !ok {
|
||||||
|
if string(jsonCfg) != "{}" {
|
||||||
|
grpclog.Warningf("non-empty balancer configuration %q, but balancer does not implement ParseConfig", string(jsonCfg))
|
||||||
|
}
|
||||||
|
// Stop at this, though the builder doesn't support parsing config.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
cfg, err := parser.ParseConfig(jsonCfg)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error parsing loadBalancingConfig for policy %q: %v", name, err)
|
||||||
|
}
|
||||||
|
bc.Config = cfg
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// This is reached when the for loop iterates over all entries, but didn't
|
||||||
|
// return. This means we had a loadBalancingConfig slice but did not
|
||||||
|
// encounter a registered policy. The config is considered invalid in this
|
||||||
|
// case.
|
||||||
|
return fmt.Errorf("invalid loadBalancingConfig: no supported policies found")
|
||||||
|
}
|
23
vendor/google.golang.org/grpc/internal/status/status.go
generated
vendored
23
vendor/google.golang.org/grpc/internal/status/status.go
generated
vendored
|
@ -97,7 +97,7 @@ func (s *Status) Err() error {
|
||||||
if s.Code() == codes.OK {
|
if s.Code() == codes.OK {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return (*Error)(s.Proto())
|
return &Error{e: s.Proto()}
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithDetails returns a new status with the provided details messages appended to the status.
|
// WithDetails returns a new status with the provided details messages appended to the status.
|
||||||
|
@ -136,26 +136,27 @@ func (s *Status) Details() []interface{} {
|
||||||
return details
|
return details
|
||||||
}
|
}
|
||||||
|
|
||||||
// Error is an alias of a status proto. It implements error and Status,
|
// Error wraps a pointer of a status proto. It implements error and Status,
|
||||||
// and a nil Error should never be returned by this package.
|
// and a nil *Error should never be returned by this package.
|
||||||
type Error spb.Status
|
type Error struct {
|
||||||
|
e *spb.Status
|
||||||
|
}
|
||||||
|
|
||||||
func (se *Error) Error() string {
|
func (e *Error) Error() string {
|
||||||
p := (*spb.Status)(se)
|
return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(e.e.GetCode()), e.e.GetMessage())
|
||||||
return fmt.Sprintf("rpc error: code = %s desc = %s", codes.Code(p.GetCode()), p.GetMessage())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// GRPCStatus returns the Status represented by se.
|
// GRPCStatus returns the Status represented by se.
|
||||||
func (se *Error) GRPCStatus() *Status {
|
func (e *Error) GRPCStatus() *Status {
|
||||||
return FromProto((*spb.Status)(se))
|
return FromProto(e.e)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Is implements future error.Is functionality.
|
// Is implements future error.Is functionality.
|
||||||
// A Error is equivalent if the code and message are identical.
|
// A Error is equivalent if the code and message are identical.
|
||||||
func (se *Error) Is(target error) bool {
|
func (e *Error) Is(target error) bool {
|
||||||
tse, ok := target.(*Error)
|
tse, ok := target.(*Error)
|
||||||
if !ok {
|
if !ok {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return proto.Equal((*spb.Status)(se), (*spb.Status)(tse))
|
return proto.Equal(e.e, tse.e)
|
||||||
}
|
}
|
||||||
|
|
2
vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
generated
vendored
2
vendor/google.golang.org/grpc/internal/syscall/syscall_nonlinux.go
generated
vendored
|
@ -18,6 +18,8 @@
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
// Package syscall provides functionalities that grpc uses to get low-level
|
||||||
|
// operating system stats/info.
|
||||||
package syscall
|
package syscall
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
|
62
vendor/google.golang.org/grpc/internal/transport/controlbuf.go
generated
vendored
62
vendor/google.golang.org/grpc/internal/transport/controlbuf.go
generated
vendored
|
@ -857,53 +857,56 @@ func (l *loopyWriter) processData() (bool, error) {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
var (
|
var (
|
||||||
idx int
|
|
||||||
buf []byte
|
buf []byte
|
||||||
)
|
)
|
||||||
if len(dataItem.h) != 0 { // data header has not been written out yet.
|
// Figure out the maximum size we can send
|
||||||
buf = dataItem.h
|
maxSize := http2MaxFrameLen
|
||||||
} else {
|
|
||||||
idx = 1
|
|
||||||
buf = dataItem.d
|
|
||||||
}
|
|
||||||
size := http2MaxFrameLen
|
|
||||||
if len(buf) < size {
|
|
||||||
size = len(buf)
|
|
||||||
}
|
|
||||||
if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control.
|
if strQuota := int(l.oiws) - str.bytesOutStanding; strQuota <= 0 { // stream-level flow control.
|
||||||
str.state = waitingOnStreamQuota
|
str.state = waitingOnStreamQuota
|
||||||
return false, nil
|
return false, nil
|
||||||
} else if strQuota < size {
|
} else if maxSize > strQuota {
|
||||||
size = strQuota
|
maxSize = strQuota
|
||||||
|
}
|
||||||
|
if maxSize > int(l.sendQuota) { // connection-level flow control.
|
||||||
|
maxSize = int(l.sendQuota)
|
||||||
|
}
|
||||||
|
// Compute how much of the header and data we can send within quota and max frame length
|
||||||
|
hSize := min(maxSize, len(dataItem.h))
|
||||||
|
dSize := min(maxSize-hSize, len(dataItem.d))
|
||||||
|
if hSize != 0 {
|
||||||
|
if dSize == 0 {
|
||||||
|
buf = dataItem.h
|
||||||
|
} else {
|
||||||
|
// We can add some data to grpc message header to distribute bytes more equally across frames.
|
||||||
|
// Copy on the stack to avoid generating garbage
|
||||||
|
var localBuf [http2MaxFrameLen]byte
|
||||||
|
copy(localBuf[:hSize], dataItem.h)
|
||||||
|
copy(localBuf[hSize:], dataItem.d[:dSize])
|
||||||
|
buf = localBuf[:hSize+dSize]
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
buf = dataItem.d
|
||||||
}
|
}
|
||||||
|
|
||||||
if l.sendQuota < uint32(size) { // connection-level flow control.
|
size := hSize + dSize
|
||||||
size = int(l.sendQuota)
|
|
||||||
}
|
|
||||||
// Now that outgoing flow controls are checked we can replenish str's write quota
|
// Now that outgoing flow controls are checked we can replenish str's write quota
|
||||||
str.wq.replenish(size)
|
str.wq.replenish(size)
|
||||||
var endStream bool
|
var endStream bool
|
||||||
// If this is the last data message on this stream and all of it can be written in this iteration.
|
// If this is the last data message on this stream and all of it can be written in this iteration.
|
||||||
if dataItem.endStream && size == len(buf) {
|
if dataItem.endStream && len(dataItem.h)+len(dataItem.d) <= size {
|
||||||
// buf contains either data or it contains header but data is empty.
|
|
||||||
if idx == 1 || len(dataItem.d) == 0 {
|
|
||||||
endStream = true
|
endStream = true
|
||||||
}
|
}
|
||||||
}
|
|
||||||
if dataItem.onEachWrite != nil {
|
if dataItem.onEachWrite != nil {
|
||||||
dataItem.onEachWrite()
|
dataItem.onEachWrite()
|
||||||
}
|
}
|
||||||
if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil {
|
if err := l.framer.fr.WriteData(dataItem.streamID, endStream, buf[:size]); err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
buf = buf[size:]
|
|
||||||
str.bytesOutStanding += size
|
str.bytesOutStanding += size
|
||||||
l.sendQuota -= uint32(size)
|
l.sendQuota -= uint32(size)
|
||||||
if idx == 0 {
|
dataItem.h = dataItem.h[hSize:]
|
||||||
dataItem.h = buf
|
dataItem.d = dataItem.d[dSize:]
|
||||||
} else {
|
|
||||||
dataItem.d = buf
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out.
|
if len(dataItem.h) == 0 && len(dataItem.d) == 0 { // All the data from that message was written out.
|
||||||
str.itl.dequeue()
|
str.itl.dequeue()
|
||||||
|
@ -924,3 +927,10 @@ func (l *loopyWriter) processData() (bool, error) {
|
||||||
}
|
}
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func min(a, b int) int {
|
||||||
|
if a < b {
|
||||||
|
return a
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue