diff --git a/go.mod b/go.mod
index 2dfd2089c11..b15e969073b 100644
--- a/go.mod
+++ b/go.mod
@@ -50,4 +50,5 @@ replace (
k8s.io/apimachinery => k8s.io/apimachinery v0.17.6
k8s.io/client-go => k8s.io/client-go v0.17.6
k8s.io/code-generator => k8s.io/code-generator v0.17.6
+ knative.dev/pkg => github.com/slinkydeveloper/pkg v0.0.0-20200724072217-fc14798189e0
)
diff --git a/go.sum b/go.sum
index e3c205531c3..588158e7973 100644
--- a/go.sum
+++ b/go.sum
@@ -1,8 +1,6 @@
bazil.org/fuse v0.0.0-20160811212531-371fbbdaa898/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
bazil.org/fuse v0.0.0-20180421153158-65cc252bf669/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8=
-cloud.google.com/go v0.25.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
-cloud.google.com/go v0.30.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
cloud.google.com/go v0.39.0/go.mod h1:rVLT6fkc8chs9sfPtFc1SBH6em7n+ZoXaG+87tDISts=
@@ -20,7 +18,6 @@ cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6T
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
-cloud.google.com/go v0.55.0/go.mod h1:ZHmoY+/lIMNkN2+fBmuTiqZ4inFhvQad8ft7MT8IV5Y=
cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk=
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
cloud.google.com/go v0.60.0 h1:R+tDlceO7Ss+zyvtsdhTxacDyZ1k99xwskQ4FT7ruoM=
@@ -47,6 +44,7 @@ cloud.google.com/go/pubsub v1.2.0 h1:Lpy6hKgdcl7a3WGSfJIFmxmcdjSpP6OmBEfcOv1Y680
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU=
cloud.google.com/go/pubsub v1.4.0/go.mod h1:LFrqilwgdw4X2cJS9ALgzYmMu+ULyrUN6IHV3CPK4TM=
+cloud.google.com/go/pubsub v1.5.0/go.mod h1:ZEwJccE3z93Z2HWvstpri00jOg7oO4UZDtKhwDwqF0w=
cloud.google.com/go/pubsub v1.6.1/go.mod h1:kvW9rcn9OLEx6eTIzMBbWbpB8YsK3vu9jxgPolVz+p4=
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
@@ -70,7 +68,6 @@ contrib.go.opencensus.io/exporter/prometheus v0.2.1-0.20200609204449-6bcf6f8577f
contrib.go.opencensus.io/exporter/prometheus v0.2.1-0.20200609204449-6bcf6f8577f0/go.mod h1:MjHoxkI7Ny27toPeFkRbXbzVjzIGkwOAptrAy8Mxtm8=
contrib.go.opencensus.io/exporter/stackdriver v0.12.1/go.mod h1:iwB6wGarfphGGe/e5CWqyUk/cLzKnWsOKPVW3no6OTw=
contrib.go.opencensus.io/exporter/stackdriver v0.12.8/go.mod h1:XyyafDnFOsqoxHJgTFycKZMrRUrPThLh2iYTJF6uoO0=
-contrib.go.opencensus.io/exporter/stackdriver v0.12.9-0.20191108183826-59d068f8d8ff/go.mod h1:XyyafDnFOsqoxHJgTFycKZMrRUrPThLh2iYTJF6uoO0=
contrib.go.opencensus.io/exporter/stackdriver v0.13.1/go.mod h1:z2tyTZtPmQ2HvWH4cOmVDgtY+1lomfKdbLnkJvZdc8c=
contrib.go.opencensus.io/exporter/stackdriver v0.13.2 h1:5lKLBwUuq4S6pTbYaBtWmnay3eJfKNS3qL8M8HM5fM4=
contrib.go.opencensus.io/exporter/stackdriver v0.13.2/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc=
@@ -79,32 +76,22 @@ contrib.go.opencensus.io/exporter/zipkin v0.1.1/go.mod h1:GMvdSl3eJ2gapOaLKzTKE3
contrib.go.opencensus.io/integrations/ocsql v0.1.4/go.mod h1:8DsSdjz3F+APR+0z0WkU1aRorQCFfRxvqjUUPMbF3fE=
contrib.go.opencensus.io/resource v0.1.1/go.mod h1:F361eGI91LCmW1I/Saf+rX0+OFcigGlFvXwEGEnkRLA=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
-git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
git.apache.org/thrift.git v0.12.0/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
github.com/Azure/azure-amqp-common-go/v2 v2.1.0/go.mod h1:R8rea+gJRuJR6QxTir/XuEd+YuKoUiazDC/N96FiDEU=
-github.com/Azure/azure-pipeline-go v0.1.8/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg=
-github.com/Azure/azure-pipeline-go v0.1.9/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg=
github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4=
github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc=
github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
-github.com/Azure/azure-sdk-for-go v19.1.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
-github.com/Azure/azure-sdk-for-go v21.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
-github.com/Azure/azure-sdk-for-go v28.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/azure-sdk-for-go v29.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/azure-sdk-for-go v30.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/azure-sdk-for-go v35.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/azure-sdk-for-go v38.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/azure-sdk-for-go v42.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc=
github.com/Azure/azure-service-bus-go v0.9.1/go.mod h1:yzBx6/BUGfjfeqbRZny9AQIbIe3AcV9WZbAdpkoXOa0=
-github.com/Azure/azure-storage-blob-go v0.0.0-20190123011202-457680cc0804/go.mod h1:oGfmITT1V6x//CswqY2gtAHND+xIP64/qL7a5QJix0Y=
github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0=
github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8=
github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
-github.com/Azure/go-autorest v10.15.5+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
-github.com/Azure/go-autorest v11.1.2+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest v12.0.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
github.com/Azure/go-autorest v14.1.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
-github.com/Azure/go-autorest/autorest v0.1.0/go.mod h1:AKyIcETwSUFxIcs/Wnq/C+kwCtlEYGUVd7FPNb2slmg=
github.com/Azure/go-autorest/autorest v0.2.0/go.mod h1:AKyIcETwSUFxIcs/Wnq/C+kwCtlEYGUVd7FPNb2slmg=
github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI=
github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0=
@@ -123,7 +110,6 @@ github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+v
github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0=
github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM=
-github.com/Azure/go-autorest/autorest/to v0.1.0/go.mod h1:GunWKJp1AEqgMaGLV+iocmRAJWqST1wQYhyyjXJ3SJc=
github.com/Azure/go-autorest/autorest/to v0.2.0/go.mod h1:GunWKJp1AEqgMaGLV+iocmRAJWqST1wQYhyyjXJ3SJc=
github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA=
github.com/Azure/go-autorest/autorest/validation v0.1.0/go.mod h1:Ha3z/SqBeaalWQvokg3NZAlQTalVMtOIAs1aGK7G6u8=
@@ -142,7 +128,6 @@ github.com/Djarvur/go-err113 v0.1.0/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpI
github.com/GoogleCloudPlatform/cloud-builders/gcs-fetcher v0.0.0-20191203181535-308b93ad1f39/go.mod h1:yfGmCjKuUzk9WzubMlW2zwjhCraIc/J+M40cufdemRM=
github.com/GoogleCloudPlatform/cloudsql-proxy v0.0.0-20191009163259-e802c2cb94ae/go.mod h1:mjwGPas4yKduTyubHvD1Atl9r1rUq8DfVy+gkVvZ+oo=
github.com/GoogleCloudPlatform/k8s-cloud-provider v0.0.0-20190822182118-27a4ced34534/go.mod h1:iroGtC8B3tQiqtds1l+mgk/BBOrxbqjH+eUfFQYRc14=
-github.com/GoogleCloudPlatform/testgrid v0.0.1-alpha.3/go.mod h1:f96W2HYy3tiBNV5zbbRc+NczwYHgG1PHXMQfoEWv680=
github.com/GoogleCloudPlatform/testgrid v0.0.7/go.mod h1:lmtHGBL0M/MLbu1tR9BWV7FGZ1FEFIdPqmJiHNCL7y8=
github.com/GoogleCloudPlatform/testgrid v0.0.13/go.mod h1:UlC/MvnkKjiVGijIKOHxnVyhDiTDCydw9H1XzmclQGU=
github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E=
@@ -200,24 +185,17 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkY
github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/asaskevich/govalidator v0.0.0-20200108200545-475eaeb16496/go.mod h1:oGkLhpf+kjZl6xBf758TQhh5XrAeiJv/7FRz/2spLIg=
-github.com/aws/aws-k8s-tester v0.0.0-20190114231546-b411acf57dfe/go.mod h1:1ADF5tAtU1/mVtfMcHAYSm2fPw71DA7fFk0yed64/0I=
-github.com/aws/aws-k8s-tester v0.9.3/go.mod h1:nsh1f7joi8ZI1lvR+Ron6kJM2QdCYPU/vFePghSSuTc=
github.com/aws/aws-k8s-tester v1.0.0/go.mod h1:NUNd9k43+h9O5tvwL+4N1Ctb//SapmeeFX1G0/2/0Qc=
github.com/aws/aws-sdk-go v1.15.11/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
github.com/aws/aws-sdk-go v1.15.27/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0=
-github.com/aws/aws-sdk-go v1.15.90/go.mod h1:es1KtYUFs7le0xQ3rOihkuoVD90z7D0fR2Qm4S00/gU=
-github.com/aws/aws-sdk-go v1.16.18/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.16.26/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.19.18/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.19.45/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.20.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.23.20 h1:2CBuL21P0yKdZN5urf2NxKa1ha8fhnY+A3pBCHFeZoA=
github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
-github.com/aws/aws-sdk-go v1.23.22/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.25.11/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/aws/aws-sdk-go v1.27.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
-github.com/aws/aws-sdk-go v1.29.32/go.mod h1:1KvfttTE3SPKMpo8g2c6jL3ZKfXtFvKscTgahTma5Xg=
-github.com/aws/aws-sdk-go v1.29.34/go.mod h1:1KvfttTE3SPKMpo8g2c6jL3ZKfXtFvKscTgahTma5Xg=
github.com/aws/aws-sdk-go v1.30.4/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
github.com/aws/aws-sdk-go v1.30.5 h1:i+sSesaMrSxiUt3NJddOApe2mXK+VNBgfcmRTvNFrXM=
github.com/aws/aws-sdk-go v1.30.5/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
@@ -268,7 +246,6 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn
github.com/cihub/seelog v0.0.0-20170130134532-f561c5e57575/go.mod h1:9d6lWj8KzO/fd/NrVaLscBKmPigpZpn5YawRPw+e3Yo=
github.com/clarketm/json v1.13.4/go.mod h1:ynr2LRfb0fQU34l07csRNBTcivjySLLiY1YzQqKVfdo=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
-github.com/cloudevents/sdk-go v0.0.0-20190509003705-56931988abe3/go.mod h1:j1nZWMLGg3om8SswStBoY6/SHvcLM19MuZqwDtMtmzs=
github.com/cloudevents/sdk-go v1.0.0 h1:gS5I0s2qPmdc4GBPlUmzZU7RH30BaiOdcRJ1RkXnPrc=
github.com/cloudevents/sdk-go v1.0.0/go.mod h1:3TkmM0cFqkhCHOq5JzzRU/RxRkwzoS8TZ+G448qVTog=
github.com/cloudevents/sdk-go/v2 v2.0.0/go.mod h1:3CTrpB4+u7Iaj6fd7E2Xvm5IxMdRoaAhqaRVnOr2rCU=
@@ -311,7 +288,6 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE=
github.com/deislabs/oras v0.8.1/go.mod h1:Mx0rMSbBNaNfY9hjpccEnxkOqJL6KGjtxNHPLC4G4As=
-github.com/denisenkom/go-mssqldb v0.0.0-20190111225525-2fea367d496d/go.mod h1:xN/JuLBIz4bjkxNmByTiV1IbhfnYb6oo99phBn4Eqhc=
github.com/denisenkom/go-mssqldb v0.0.0-20191124224453-732737034ffd/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0=
github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY=
@@ -322,16 +298,13 @@ github.com/dgryski/go-gk v0.0.0-20200319235926-a69029f61654/go.mod h1:qm+vckxRlD
github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8=
github.com/djherbis/atime v1.0.0/go.mod h1:5W+KBIuTwVGcqjIfaTwt+KSYX1o6uep8dtevevQP/f8=
github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
-github.com/docker/cli v0.0.0-20190925022749-754388324470/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/cli v0.0.0-20200130152716-5d0cf8839492/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/cli v0.0.0-20200210162036-a4bedce16568/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/distribution v0.0.0-20191216044856-a8371794149d/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY=
-github.com/docker/distribution v2.6.0-rc.1.0.20180327202408-83389a148052+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
-github.com/docker/docker v1.4.2-0.20180531152204-71cd53e4a197/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v1.4.2-0.20200203170920-46ec8731fbce/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v1.13.1 h1:IkZjBSIc8hBjLpqeAbeE5mca5mNgeatLHBy3GO78BWo=
@@ -345,6 +318,7 @@ github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDD
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE=
+github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg=
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
github.com/dsnet/compress v0.0.1/go.mod h1:Aw8dCMJ7RioblQeTqt88akK31OvO8Dhf5JflhBbQEHo=
@@ -355,6 +329,7 @@ github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5m
github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
+github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e h1:p1yVGRW3nmb85p1Sh1ZJSDm4A4iKLS5QNbvUHMgGu/M=
github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
github.com/emicklei/go-restful v2.9.5+incompatible h1:spTtZBk5DYEvbxMVutUuTyh1Ao2r4iyvLdACqsl/Ljk=
@@ -398,7 +373,6 @@ github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
-github.com/go-ini/ini v1.46.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
github.com/go-ini/ini v1.55.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
@@ -466,7 +440,6 @@ github.com/go-openapi/swag v0.19.7/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfT
github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4=
github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA=
github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4=
-github.com/go-sql-driver/mysql v0.0.0-20160411075031-7ebe0a500653/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
@@ -487,9 +460,7 @@ github.com/go-toolsmith/strparse v1.0.0/go.mod h1:YI2nUKP9YGZnL/L1/DLFBfixrcjslW
github.com/go-toolsmith/typep v1.0.0/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU=
github.com/go-toolsmith/typep v1.0.2/go.mod h1:JSQCQMUPdRlMZFswiq3TGpNp1GMktqkR2Ns5AIQkATU=
github.com/go-xmlfmt/xmlfmt v0.0.0-20191208150333-d5b6f63a941b/go.mod h1:aUCEOzzezBEjDBbFBoSiya/gduyIiWYRP6CnSFIV8AM=
-github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0=
github.com/gobuffalo/envy v1.6.5/go.mod h1:N+GkhhZ/93bGZc6ZKhJLP6+m+tCNPKwgSpH9kaifseQ=
-github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI=
github.com/gobuffalo/envy v1.7.1 h1:OQl5ys5MBea7OGCdvPbBJWRgnhC/fGona6QKfvFeau8=
github.com/gobuffalo/envy v1.7.1/go.mod h1:FurDp9+EDPE4aIUS3ZLyD+7/9fpx7YRt/ukY6jIHf0w=
github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8=
@@ -497,7 +468,6 @@ github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6
github.com/gofrs/flock v0.0.0-20190320160742-5135e617513b/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
github.com/gofrs/flock v0.7.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s=
-github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
@@ -518,7 +488,6 @@ github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4er
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
-github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
@@ -566,7 +535,6 @@ github.com/golangplus/bytes v0.0.0-20160111154220-45c989fe5450/go.mod h1:Bk6SMAO
github.com/golangplus/fmt v0.0.0-20150411045040-2a5d6d7d2995/go.mod h1:lJgMEyOkYFkPcDKwRXegd+iM6E7matEszMG5HhwytU8=
github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk=
github.com/gomodule/redigo v1.7.0/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4=
-github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
@@ -580,9 +548,7 @@ github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.1 h1:JFrFEBb2xKufg6XkJsJr+WbKb4FQlURi5RUcBveYu9k=
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
-github.com/google/go-containerregistry v0.0.0-20191010200024-a3d713f9b7f8/go.mod h1:KyKXa9ciM8+lgMXwOVsXi7UxGrsf9mM61Mzs+xKUrKE=
github.com/google/go-containerregistry v0.0.0-20200115214256-379933c9c22b/go.mod h1:Wtl/v6YdQxv397EREtzwgd9+Ud7Q5D8XMbi3Zazgkrs=
-github.com/google/go-containerregistry v0.0.0-20200123184029-53ce695e4179/go.mod h1:Wtl/v6YdQxv397EREtzwgd9+Ud7Q5D8XMbi3Zazgkrs=
github.com/google/go-containerregistry v0.0.0-20200331213917-3d03ed9b1ca2/go.mod h1:pD1UFYs7MCAx+ZLShBdttcaOSbyc8F9Na/9IZLNwJeA=
github.com/google/go-containerregistry v0.1.1/go.mod h1:npTSyywOeILcgWqd+rvtzGWflIPPcBQhYoOONaY4ltM=
github.com/google/go-github v17.0.0+incompatible h1:N0LgJ1j65A7kfXrZnUDaYCs/Sf4rEjNlfyDHW9dolSY=
@@ -598,13 +564,11 @@ github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO
github.com/google/go-replayers/grpcreplay v0.1.0/go.mod h1:8Ig2Idjpr6gifRd6pNVggX6TC1Zw6Jx74AKp7QNH2QE=
github.com/google/go-replayers/httpreplay v0.1.0/go.mod h1:YKZViNhiGgqdBlUbI2MwGpq4pXxNmhJLPHQ7cv2b5no=
github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
-github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/licenseclassifier v0.0.0-20190926221455-842c0d70d702/go.mod h1:qsqn2hxC+vURpyBRygGUuinTO42MFRLcsmQ/P8v94+M=
-github.com/google/licenseclassifier v0.0.0-20200402202327-879cb1424de0/go.mod h1:qsqn2hxC+vURpyBRygGUuinTO42MFRLcsmQ/P8v94+M=
github.com/google/licenseclassifier v0.0.0-20200708223521-3d09a0ea2f39/go.mod h1:qsqn2hxC+vURpyBRygGUuinTO42MFRLcsmQ/P8v94+M=
github.com/google/mako v0.0.0-20190821191249-122f8dcef9e3 h1:/o5e44nTD/QEEiWPGSFT3bSqcq3Qg7q27N9bv4gKh5M=
github.com/google/mako v0.0.0-20190821191249-122f8dcef9e3/go.mod h1:YzLcVlL+NqWnmUEPuhS1LxDDwGO9WNbVlEXaF4IH35g=
@@ -626,12 +590,10 @@ github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm4
github.com/google/rpmpack v0.0.0-20191226140753-aa36bfddb3a0/go.mod h1:RaTPr0KUf2K7fnZYLNDrr8rxAamWs3iNywJLtQ2AzBg=
github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
-github.com/google/uuid v1.1.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/wire v0.3.0/go.mod h1:i1DMg/Lu8Sz5yYl25iOdmc5CT5qusaa+zmRWs16741s=
github.com/google/wire v0.4.0/go.mod h1:ngWDr9Qvq3yZA10YrxfyGELY/AFWGVpy9c1LTRi1EoU=
-github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
github.com/googleapis/gax-go v2.0.2+incompatible h1:silFMLAnr330+NRuag/VjIGF7TLp/LBrV2CJKFLWEww=
github.com/googleapis/gax-go v2.0.2+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
@@ -663,7 +625,6 @@ github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2z
github.com/gorilla/mux v1.7.4 h1:VuZ8uybHlWmqV03+zRzdwKL4tUnIp1MAQtp1mIFE1bc=
github.com/gorilla/mux v1.7.4/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4=
-github.com/gorilla/sessions v1.1.3/go.mod h1:8KCfur6+4Mqcc6S0FEfKuN15Vl5MgXW92AE8ovaJD0w=
github.com/gorilla/sessions v1.2.0/go.mod h1:dk2InVEVJ0sfLlnXv9EAgkf6ecYs/i80K/zI+bUmuGM=
github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ=
@@ -671,14 +632,12 @@ github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/ad
github.com/gostaticanalysis/analysisutil v0.0.0-20190318220348-4088753ea4d3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE=
github.com/gostaticanalysis/analysisutil v0.0.3/go.mod h1:eEOZF4jCKGi+aprrirO9e7WKB3beBRtWgqGunKl6pKE=
github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo=
-github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY=
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/gregjones/httpcache v0.0.0-20190212212710-3befbb6ad0cc h1:f8eY6cV/x1x+HLjOp4r72s/31/V2aTUtg5oKRRPf8/Q=
github.com/gregjones/httpcache v0.0.0-20190212212710-3befbb6ad0cc/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
-github.com/grpc-ecosystem/grpc-gateway v1.4.1/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
github.com/grpc-ecosystem/grpc-gateway v1.8.5 h1:2+KSC78XiO6Qy0hIjfc1OD9H+hsaJdJlb8Kqsd41CTE=
github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
@@ -686,7 +645,6 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.2/go.mod h1:vNeuVxBJEsws4ogUvrchl83t
github.com/grpc-ecosystem/grpc-gateway v1.9.4/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
github.com/grpc-ecosystem/grpc-gateway v1.9.5 h1:UImYN5qQ8tuGpGE16ZmjvcTtTw24zw1QAp/SlnNrZhI=
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
-github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c=
github.com/grpc-ecosystem/grpc-gateway v1.12.2/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c=
github.com/grpc-ecosystem/grpc-gateway v1.14.6 h1:8ERzHx8aj1Sc47mu9n/AksaKCSWrMchFtkdrS4BIj5o=
github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4GkDEdKCRJduHpTxT3/jcw=
@@ -702,7 +660,6 @@ github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrj
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-multierror v0.0.0-20161216184304-ed905158d874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
-github.com/hashicorp/go-multierror v0.0.0-20171204182908-b7773ae21874/go.mod h1:JMRHfdO9jKNzS/+BTlxCjKNQHg/jZAft8U7LloJvN7I=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
github.com/hashicorp/go-retryablehttp v0.6.4/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY=
@@ -750,14 +707,11 @@ github.com/jarcoal/httpmock v1.0.5/go.mod h1:ATjnClrvW/3tijVmpL/va5Z3aAyGvqU3gCT
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
-github.com/jenkins-x/go-scm v1.5.65/go.mod h1:MgGRkJScE/rJ30J/bXYqduN5sDPZqZFITJopsnZmTOw=
github.com/jenkins-x/go-scm v1.5.79/go.mod h1:PCT338UhP/pQ0IeEeMEf/hoLTYKcH7qjGEKd7jPkeYg=
github.com/jenkins-x/go-scm v1.5.117/go.mod h1:PCT338UhP/pQ0IeEeMEf/hoLTYKcH7qjGEKd7jPkeYg=
github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jingyugao/rowserrcheck v0.0.0-20191204022205-72ab7603b68a/go.mod h1:xRskid8CManxVta/ALEhJha/pweKBaVG6fWgc0yH25s=
-github.com/jinzhu/gorm v0.0.0-20170316141641-572d0a0ab1eb/go.mod h1:Vla75njaFJ8clLU1W44h34PjIkijhjHIYnZxMqCdxqo=
github.com/jinzhu/gorm v1.9.12/go.mod h1:vhTjlKSJUTWNtcbQtrMBFCxy7eXTzeCAzfL5fBZT/Qs=
-github.com/jinzhu/inflection v0.0.0-20190603042836-f5c5f50e6090/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/jinzhu/now v1.0.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
github.com/jinzhu/now v1.1.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
@@ -776,8 +730,6 @@ github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqx
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0=
github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
-github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
-github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
@@ -789,7 +741,6 @@ github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/X
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
-github.com/kelseyhightower/envconfig v1.3.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg=
github.com/kelseyhightower/envconfig v1.4.0 h1:Im6hONhd3pLkfDFsbRgu68RDNkGF1r3dvMUtDTo2cv8=
github.com/kelseyhightower/envconfig v1.4.0/go.mod h1:cccZRl6mQpaq41TPp5QxidR+Sa3axMbJDNb//FQX6Gg=
github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
@@ -802,10 +753,7 @@ github.com/klauspost/compress v1.9.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0
github.com/klauspost/compress v1.10.2/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
-github.com/klauspost/cpuid v1.2.2/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/pgzip v1.2.1/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
-github.com/knative/build v0.1.2/go.mod h1:/sU74ZQkwlYA5FwYDJhYTy61i/Kn+5eWfln2jDbw3Qo=
-github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
@@ -815,7 +763,6 @@ github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
-github.com/kr/pty v1.0.0/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
@@ -868,13 +815,11 @@ github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzp
github.com/mattn/go-runewidth v0.0.8/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/mattn/go-shellwords v1.0.9/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y=
-github.com/mattn/go-sqlite3 v0.0.0-20160514122348-38ee283dabf1/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/go-sqlite3 v2.0.1+incompatible/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/mattn/go-zglob v0.0.1/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo=
github.com/mattn/go-zglob v0.0.2/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo=
github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
-github.com/matttproud/golang_protobuf_extensions v1.0.0/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY=
@@ -914,16 +859,12 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/nakabonne/nestif v0.3.0/go.mod h1:dI314BppzXjJ4HsCnbo7XzrJHPszZsjnk5wEBSYHI2c=
-github.com/nats-io/gnatsd v1.4.1/go.mod h1:nqco77VO78hLCJpIcVfygDP2rPGfsEHkGTUk94uh5DQ=
-github.com/nats-io/go-nats v1.7.0/go.mod h1:+t7RHT5ApZebkrQdnn6AhQJmhJJiKAvJUio1PiiCtj0=
github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg=
github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU=
github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k=
github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w=
-github.com/nats-io/nkeys v0.0.2/go.mod h1:dab7URMsZm6Z/jp9Z5UGa87Uutgc2mVpXLC4B7TDb/4=
github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w=
-github.com/nats-io/nuid v1.0.0/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uYLpLIr5fm8diHn0JbqRycJi6w0Ms=
github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU=
@@ -947,7 +888,6 @@ github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0
github.com/onsi/ginkgo v1.12.1 h1:mFwc4LvZ0xpSvDZ3E+k8Yte0hLOMxXUlP+yXtJqkYfQ=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
-github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
@@ -968,7 +908,6 @@ github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59P
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/runtime-tools v0.0.0-20181011054405-1d69bd0f9c39/go.mod h1:r3f7wjNzSs2extwzU3Y+6pKfobzPh+kKFJ3ofN+3nfs=
github.com/opentracing/opentracing-go v1.1.1-0.20190913142402-a7454ce5950e/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
-github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw=
github.com/openzipkin/zipkin-go v0.2.0/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4=
github.com/openzipkin/zipkin-go v0.2.2 h1:nY8Hti+WKaP0cRsSeQ026wU03QsM762XBeCXBb9NAWI=
@@ -980,7 +919,6 @@ github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FI
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
-github.com/pelletier/go-toml v1.3.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo=
github.com/pelletier/go-toml v1.6.0/go.mod h1:5N711Q9dKgbdkxHL+MEfF31hpT7l0S0s/t2kKREewys=
github.com/pelletier/go-toml v1.8.0 h1:Keo9qb7iRJs2voHvunFtuuYFsbWeOBh8/P9v/kVMFtw=
github.com/pelletier/go-toml v1.8.0/go.mod h1:D6yutnOGMveHEPV7VQOuvI/gXY61bv+9bAOTRnLElKs=
@@ -1005,7 +943,6 @@ github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndr
github.com/pquerna/cachecontrol v0.0.0-20171018203845-0dec1b30a021/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
github.com/prometheus/client_golang v0.9.2 h1:awm861/B8OKDd2I/6o1dy3ra4BamzKhYOiGItCeZ740=
github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM=
-github.com/prometheus/client_model v0.0.0-20170216185247-6f3806018612/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@@ -1013,23 +950,17 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
-github.com/prometheus/common v0.0.0-20180518154759-7600349dcfe1/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
-github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
-github.com/prometheus/common v0.0.0-20181020173914-7e9e6cabbd39/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA=
github.com/prometheus/common v0.9.1 h1:KOMtN28tlbam3/7ZKEYKHhKoJZYYj3gMH4uc62x7X7U=
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.0-20180612222113-7d6f385de8be/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
-github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
github.com/prometheus/procfs v0.0.6/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
-github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/procfs v0.0.10/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/procfs v0.0.11 h1:DhHlBtkHWPYi8O2y31JkK0TF+DGM+51OopZjH/Ia5qI=
github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
@@ -1050,7 +981,6 @@ github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6So
github.com/rogpeppe/fastuuid v1.1.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/fastuuid v1.2.0 h1:Ppwyp6VYCF1nvBTXL3trRso7mXMlRrw9ooo375wvi2s=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
-github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.3.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.5.2 h1:qLvObTrvO/XRCqmkKxUlOBc48bI3efyDuAZe25QiF0w=
@@ -1062,7 +992,6 @@ github.com/ryancurrah/gomodguard v1.0.4/go.mod h1:9T/Cfuxs5StfsocWr4WzDL36HqnX0f
github.com/ryancurrah/gomodguard v1.1.0/go.mod h1:4O8tr7hBODaGE6VIhfJDHcwzh5GUccKSJBU0UMXJFVM=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/sassoftware/go-rpmutils v0.0.0-20190420191620-a8f1baeba37b/go.mod h1:am+Fp8Bt506lA3Rk3QCmSqmYmLMnPDhdDUcosQCAx+I=
-github.com/satori/go.uuid v0.0.0-20160713180306-0aa62d5ddceb/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
@@ -1073,22 +1002,20 @@ github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAm
github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM=
github.com/shirou/gopsutil v0.0.0-20190901111213-e4ec7b275ada/go.mod h1:WWnYX4lzhCH5h/3YBfyVA3VbLYjlMZZAQcW9ojMexNc=
github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4/go.mod h1:qsXQc7+bwAM3Q1u/4XEfrquwF8Lw7D7y5cD8CuHnfIc=
-github.com/shurcooL/githubv4 v0.0.0-20180925043049-51d7b505e2e9/go.mod h1:hAF0iLZy4td2EX+/8Tw+4nodhlMrwN3HupfaXj3zkGo=
github.com/shurcooL/githubv4 v0.0.0-20190718010115-4ba037080260/go.mod h1:hAF0iLZy4td2EX+/8Tw+4nodhlMrwN3HupfaXj3zkGo=
github.com/shurcooL/githubv4 v0.0.0-20191102174205-af46314aec7b/go.mod h1:hAF0iLZy4td2EX+/8Tw+4nodhlMrwN3HupfaXj3zkGo=
github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
-github.com/shurcooL/graphql v0.0.0-20180924043259-e4a3a37e6d42/go.mod h1:AuYgA5Kyo4c7HfUmvRGs/6rGlMMV/6B1bVnB9JxJEEg=
github.com/shurcooL/graphql v0.0.0-20181231061246-d48a9a75455f/go.mod h1:AuYgA5Kyo4c7HfUmvRGs/6rGlMMV/6B1bVnB9JxJEEg=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
-github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=
-github.com/sirupsen/logrus v1.1.1/go.mod h1:zrgwTnHtNr00buQ1vSptGe8m1f/BbgsPukg8qsT7A+A=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
+github.com/slinkydeveloper/pkg v0.0.0-20200724072217-fc14798189e0 h1:PZV6e80XJ+wqQ9cqrbNRQRwVzTxZkMGXpo6wyzjE0wk=
+github.com/slinkydeveloper/pkg v0.0.0-20200724072217-fc14798189e0/go.mod h1:GtoTEupsOzadgRKT4GgPWukbhAcINwDDGcKibTyd1Fk=
github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc=
github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM=
github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM=
@@ -1113,7 +1040,6 @@ github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.1-0.20171106142849-4c012f6dcd95/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
-github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
@@ -1140,11 +1066,8 @@ github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69
github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
github.com/tdakkota/asciicheck v0.0.0-20200416190851-d7f85be797a2/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM=
github.com/tdakkota/asciicheck v0.0.0-20200416200610-e657995f937b/go.mod h1:yHp0ai0Z9gUljN3o0xMhYJnH/IcvkdTBOX2fmJ93JEM=
-github.com/tektoncd/pipeline v0.8.0/go.mod h1:IZzJdiX9EqEMuUcgdnElozdYYRh0/ZRC+NKMLj1K3Yw=
-github.com/tektoncd/pipeline v0.10.1/go.mod h1:D2X0exT46zYx95BU7ByM8+erpjoN7thmUBvlKThOszU=
github.com/tektoncd/pipeline v0.11.0/go.mod h1:hlkH32S92+/UODROH0dmxzyuMxfRFp/Nc3e29MewLn8=
github.com/tektoncd/pipeline v0.13.1-0.20200625065359-44f22a067b75/go.mod h1:R5AlT46x/F8n/pFJFjZ1U1q71GWtVXgG7RZkkoRL554=
-github.com/tektoncd/plumbing v0.0.0-20191216083742-847dcf196de9/go.mod h1:QZHgU07PRBTRF6N57w4+ApRu8OgfYLFNqCDlfEZaD9Y=
github.com/tektoncd/plumbing v0.0.0-20200217163359-cd0db6e567d2/go.mod h1:QZHgU07PRBTRF6N57w4+ApRu8OgfYLFNqCDlfEZaD9Y=
github.com/tektoncd/plumbing v0.0.0-20200430135134-e53521e1d887/go.mod h1:cZPJIeTIoP7UPTxQyTQLs7VE1TiXJSNj0te+If4Q+jI=
github.com/tektoncd/plumbing/pipelinerun-logs v0.0.0-20191206114338-712d544c2c21/go.mod h1:S62EUWtqmejjJgUMOGB1CCCHRp6C706laH06BoALkzU=
@@ -1163,7 +1086,6 @@ github.com/tommy-muehle/go-mnd v1.1.1/go.mod h1:dSUh0FtTP8VhvkL1S+gUR1OKd9ZnSaoz
github.com/tommy-muehle/go-mnd v1.3.1-0.20200224220436-e6f9a994e8fa/go.mod h1:dSUh0FtTP8VhvkL1S+gUR1OKd9ZnSaozuI6r3m6wOig=
github.com/tsenart/vegeta v12.7.1-0.20190725001342-b5f4fca92137+incompatible h1:ErZrHhRveAoznVW80gbrxz+qxJNydpA2fcQxTPHkZbU=
github.com/tsenart/vegeta v12.7.1-0.20190725001342-b5f4fca92137+incompatible/go.mod h1:Smz/ZWfhKRcyDDChZkG3CyTHdj87lHzio/HOCkbndXM=
-github.com/ugorji/go v1.1.1/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ=
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8=
@@ -1171,7 +1093,6 @@ github.com/ulikunitz/xz v0.5.7/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oW
github.com/ultraware/funlen v0.0.2/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA=
github.com/ultraware/whitespace v0.0.4/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA=
github.com/urfave/cli v0.0.0-20171014202726-7bc6a0acffa5/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
-github.com/urfave/cli v1.18.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA=
github.com/uudashr/gocognit v1.0.1/go.mod h1:j44Ayx2KW4+oB6SWMv8KsmHzZrOInQav7D3cQMJ5JUM=
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
@@ -1196,7 +1117,6 @@ github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:
github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
github.com/xeipuuv/gojsonschema v1.1.0/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos=
-github.com/xiang90/probing v0.0.0-20160813154853-07dd2e8dfe18/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xlab/handysort v0.0.0-20150421192137-fb3537ed64a1/go.mod h1:QcJo0QPSfTONNIgpN5RA8prR7fF8nkF6cTWTcNerRO8=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
@@ -1206,16 +1126,13 @@ github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs=
github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA=
github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg=
-go.etcd.io/bbolt v1.3.1-etcd.7/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
-go.etcd.io/etcd v0.0.0-20181031231232-83304cfc808c/go.mod h1:weASp41xM3dk0YHg1s/W8ecdGP5G4teSTMBPpYAaUgA=
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
go.opencensus.io v0.15.0/go.mod h1:UffZAU+4sDEINUGP/B7UfBBkq4fqLu9zXAX7ke6CHW0=
-go.opencensus.io v0.17.0/go.mod h1:mp1VrMQxhlqqDpKvH4UcQUa4YwlzNmymAjPrDdfxNpI=
go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
@@ -1234,8 +1151,6 @@ go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk=
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
-go.uber.org/automaxprocs v1.3.0 h1:II28aZoGdaglS5vVNnspf28lnZpXScxtIozx1lAjdb0=
-go.uber.org/automaxprocs v1.3.0/go.mod h1:9CWT6lKIep8U41DDaPiH6eFscnTyjfTANNQNx6LrIcA=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4=
@@ -1243,8 +1158,6 @@ go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A=
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
-go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
-go.uber.org/zap v1.9.2-0.20180814183419-67bc79d13d15/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM=
go.uber.org/zap v1.14.1 h1:nYDKopTbvAPq/NrUVZwT15y2lpROBiLLyoRTbXOYWOo=
@@ -1253,9 +1166,7 @@ go.uber.org/zap v1.15.0 h1:ZZCA22JRF2gQE5FoNmhmrf7jeJJ2uhqDUNRYKm8dvmM=
go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
gocloud.dev v0.19.0/go.mod h1:SmKwiR8YwIMMJvQBKLsC3fHNyMwXLw3PMDO+VVteJMI=
golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20180608092829-8ac0e0d97ce4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
-golang.org/x/crypto v0.0.0-20181015023909-0c41d7ab0a0e/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
@@ -1283,6 +1194,7 @@ golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de h1:ikNHVSjEfnvz6sxdSPCaPt572qowuyMDMJLLm3Db3ig=
golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -1303,7 +1215,6 @@ golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6 h1:QE6XYQK6naiK1EPAe1g/ILLxN
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
-golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -1373,9 +1284,7 @@ golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
-golang.org/x/oauth2 v0.0.0-20180724155351-3d292e4d0cdc/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
-golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -1404,7 +1313,6 @@ golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190219203350-90b0e4468f99/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1424,7 +1332,6 @@ golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190712062909-fae7ac547cb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190912141932-bc967efca4b8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1439,7 +1346,6 @@ golang.org/x/sys v0.0.0-20191113165036-4c7a9d0fe056/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20191119060738-e882bf8e40c2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20191210023423-ac6580df4449/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1449,9 +1355,7 @@ golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200317113312-5766fd39f98d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/sys v0.0.0-20200327173247-9dae0f8f5775/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -1460,6 +1364,7 @@ golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200610111108-226ff32320da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae h1:Ih9Yo4hSPImZOpfGuA4bR/ORKTAbhZo2AbWNRCnevdo=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200720211630-cb9d2d5c5666/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200802091954-4b90ce9b60b3 h1:qDJKu1y/1SjhWac4BQZjLljqvqiWUhjmDMnonmVGDAU=
golang.org/x/sys v0.0.0-20200802091954-4b90ce9b60b3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -1548,9 +1453,7 @@ golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapK
golang.org/x/tools v0.0.0-20200303214625-2b0b585e22fe/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
-golang.org/x/tools v0.0.0-20200317043434-63da46f3035e/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
golang.org/x/tools v0.0.0-20200324003944-a576cf524670/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
-golang.org/x/tools v0.0.0-20200329025819-fd4102a86c65/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
golang.org/x/tools v0.0.0-20200331202046-9d5940d49312/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200414032229-332987a829c3/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
@@ -1565,9 +1468,10 @@ golang.org/x/tools v0.0.0-20200527183253-8e7acdbce89d/go.mod h1:EkVYQZoAsY45+roY
golang.org/x/tools v0.0.0-20200601175630-2caf76543d99/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200626171337-aa94e735be7f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
-golang.org/x/tools v0.0.0-20200701000337-a32c0cb1d5b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20200706234117-b22de6825cf7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200709181711-e327e1019dfe/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200713011307-fd294ab11aed/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
+golang.org/x/tools v0.0.0-20200721223218-6123e77877b2/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200725200936-102e7d357031/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
golang.org/x/tools v0.0.0-20200731060945-b5fad4ed8dd6 h1:qKpj8TpV+LEhel7H/fR788J+KvhWZ3o3V6N2fU/iuLU=
@@ -1587,8 +1491,6 @@ gonum.org/v1/netlib v0.0.0-20190313105609-8cb42192e0e0/go.mod h1:wa6Ws7BG/ESfp6d
gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e h1:jRyg0XfpwWlhEV8mDfdNGBeSJM2fuyh9Yjrnd8kF2Ts=
gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e/go.mod h1:kS+toOQn6AQKjmKJ7gzohV1XkqsFehRA2FbsbkopSuQ=
google.golang.org/api v0.0.0-20160322025152-9bf6e6e569ff/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
-google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
-google.golang.org/api v0.0.0-20181021000519-a2651947f503/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.5.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
@@ -1613,7 +1515,6 @@ google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M
google.golang.org/api v0.29.0 h1:BaiDisFir8O4IJxvAabCGGkQ6yCJegNQqSVoYUNAnbk=
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
-google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@@ -1623,10 +1524,7 @@ google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID
google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc=
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/cloud v0.0.0-20151119220103-975617b05ea8/go.mod h1:0H1ncTHf11KCFhTc/+EFRbzSCOZx+VUbRMk55Yv5MYk=
-google.golang.org/genproto v0.0.0-20180608181217-32ee49c4dd80/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
-google.golang.org/genproto v0.0.0-20181016170114-94acd270e44e/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
@@ -1653,8 +1551,6 @@ google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfG
google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200317114155-1f3552e48f24/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
-google.golang.org/genproto v0.0.0-20200326112834-f447254575fd/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
@@ -1666,16 +1562,15 @@ google.golang.org/genproto v0.0.0-20200528110217-3d3490e7e671/go.mod h1:jDfRM7Fc
google.golang.org/genproto v0.0.0-20200603110839-e855014d5736/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
google.golang.org/genproto v0.0.0-20200626011028-ee7919e894b5/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
-google.golang.org/genproto v0.0.0-20200701001935-0939c5918c31/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200707001353-8e8330bf89df/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200709005830-7a2ca40e9dc3/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200711021454-869866162049/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
+google.golang.org/genproto v0.0.0-20200722002428-88e341933a54/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200726014623-da3ae01ef02d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200731012542-8145dea6a485 h1:wTk5DQB3+1darAz4Ldomo0r5bUOCKX7gilxQ4sb2kno=
google.golang.org/genproto v0.0.0-20200731012542-8145dea6a485/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
-google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
-google.golang.org/grpc v1.15.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
@@ -1714,7 +1609,6 @@ gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQ
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20141024133853-64131543e789/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
@@ -1797,66 +1691,36 @@ k8s.io/component-base v0.17.6/go.mod h1:jgRLWl0B0rOzFNtxQ9E4BphPmDqoMafujdau6AdG
k8s.io/csi-translation-lib v0.17.0/go.mod h1:HEF7MEz7pOLJCnxabi45IPkhSsE/KmxPQksuCrHKWls=
k8s.io/csi-translation-lib v0.17.4/go.mod h1:CsxmjwxEI0tTNMzffIAcgR9lX4wOh6AKHdxQrT7L0oo=
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
-k8s.io/gengo v0.0.0-20190306031000-7a1b7fb0289f/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20190822140433-26a664648505/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20191108084044-e500ee069b5c/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20200205140755-e0e292d8aa12 h1:pZzawYyz6VRNPVYpqGv61LWCimQv1BihyeqFrp50/G4=
k8s.io/gengo v0.0.0-20200205140755-e0e292d8aa12/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
-k8s.io/klog v0.3.3/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
k8s.io/klog/v2 v2.0.0 h1:Foj74zO6RbjjP4hBEKjnYtjjAhGg4jNynUdYF6fJrok=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
-k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc=
-k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
k8s.io/kube-openapi v0.0.0-20200410145947-bcb3869e6f29 h1:NeQXVJ2XFSkRoPzRo8AId01ZER+j8oV4SZADT4iBOXQ=
k8s.io/kube-openapi v0.0.0-20200410145947-bcb3869e6f29/go.mod h1:F+5wygcW0wmRTnM3cOgIqGivxkwSWIWT5YdsDbeAOaU=
k8s.io/kubectl v0.17.2/go.mod h1:y4rfLV0n6aPmvbRCqZQjvOp3ezxsFgpqL+zF5jH/lxk=
-k8s.io/kubernetes v1.11.10/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
-k8s.io/kubernetes v1.14.7/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk=
k8s.io/legacy-cloud-providers v0.17.0/go.mod h1:DdzaepJ3RtRy+e5YhNtrCYwlgyK87j/5+Yfp0L9Syp8=
k8s.io/legacy-cloud-providers v0.17.4/go.mod h1:FikRNoD64ECjkxO36gkDgJeiQWwyZTuBkhu+yxOc1Js=
k8s.io/metrics v0.17.2/go.mod h1:3TkNHET4ROd+NfzNxkjoVfQ0Ob4iZnaHmSEA4vYpwLw=
-k8s.io/test-infra v0.0.0-20181019233642-2e10a0bbe9b3/go.mod h1:2NzXB13Ji0nqpyublHeiPC4FZwU0TknfvyaaNfl/BTA=
-k8s.io/test-infra v0.0.0-20191212060232-70b0b49fe247/go.mod h1:d8SKryJBXAwfCFVL4wieRez47J2NOOAb9d029sWLseQ=
-k8s.io/test-infra v0.0.0-20200407001919-bc7f71ef65b8/go.mod h1:/WpJWcaDvuykB322WXP4kJbX8IpalOzuPxA62GpwkJk=
k8s.io/test-infra v0.0.0-20200514184223-ba32c8aae783/go.mod h1:bW6thaPZfL2hW7ecjx2WYwlP9KQLM47/xIJyttkVk5s=
k8s.io/test-infra v0.0.0-20200617221206-ea73eaeab7ff/go.mod h1:L3+cRvwftUq8IW1TrHji5m3msnc4uck/7LsE/GR/aZk=
-k8s.io/test-infra v0.0.0-20200630233406-1dca6122872e/go.mod h1:L3+cRvwftUq8IW1TrHji5m3msnc4uck/7LsE/GR/aZk=
+k8s.io/test-infra v0.0.0-20200722010006-526277bee528/go.mod h1:4cRZlOy5Ka3Ym/orCmNWL2dsE39pN0xHFT0WFrZe2HQ=
k8s.io/test-infra v0.0.0-20200803112140-d8aa4e063646/go.mod h1:rtUd2cOFwT0aBma1ld6W40F7PuVVw4ELLSFlz9ZEmv8=
-k8s.io/utils v0.0.0-20181019225348-5e321f9a457c/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0=
-k8s.io/utils v0.0.0-20190506122338-8fab8cb257d5/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
-k8s.io/utils v0.0.0-20190907131718-3d4f5b7dea0b/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
k8s.io/utils v0.0.0-20200124190032-861946025e34 h1:HjlUD6M0K3P8nRXmr2B9o4F9dUy9TCj/aEpReeyi6+k=
k8s.io/utils v0.0.0-20200124190032-861946025e34/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
k8s.io/utils v0.0.0-20200603063816-c1c6865ac451 h1:v8ud2Up6QK1lNOKFgiIVrZdMg7MpmSnvtrOieolJKoE=
k8s.io/utils v0.0.0-20200603063816-c1c6865ac451/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
-knative.dev/caching v0.0.0-20190719140829-2032732871ff/go.mod h1:dHXFU6CGlLlbzaWc32g80cR92iuBSpsslDNBWI8C7eg=
knative.dev/caching v0.0.0-20200116200605-67bca2c83dfa/go.mod h1:dHXFU6CGlLlbzaWc32g80cR92iuBSpsslDNBWI8C7eg=
-knative.dev/eventing-contrib v0.6.1-0.20190723221543-5ce18048c08b/go.mod h1:SnXZgSGgMSMLNFTwTnpaOH7hXDzTFtw0J8OmHflNx3g=
knative.dev/eventing-contrib v0.11.2/go.mod h1:SnXZgSGgMSMLNFTwTnpaOH7hXDzTFtw0J8OmHflNx3g=
-knative.dev/pkg v0.0.0-20191101194912-56c2594e4f11/go.mod h1:pgODObA1dTyhNoFxPZTTjNWfx6F0aKsKzn+vaT9XO/Q=
-knative.dev/pkg v0.0.0-20191111150521-6d806b998379/go.mod h1:pgODObA1dTyhNoFxPZTTjNWfx6F0aKsKzn+vaT9XO/Q=
-knative.dev/pkg v0.0.0-20200207155214-fef852970f43/go.mod h1:pgODObA1dTyhNoFxPZTTjNWfx6F0aKsKzn+vaT9XO/Q=
-knative.dev/pkg v0.0.0-20200428194351-90fc61bae7f7/go.mod h1:o+e8OVEJKIuvXPsGVPIautjXgs05xbos7G+QMRjuUps=
-knative.dev/pkg v0.0.0-20200505191044-3da93ebb24c2/go.mod h1:Q6sL35DdGs8hIQZKdaCXJGgY8f90BmNBKSb8z6d/BTM=
-knative.dev/pkg v0.0.0-20200515002500-16d7b963416f/go.mod h1:tMOHGbxtRz8zYFGEGpV/bpoTEM1o89MwYFC4YJXl3GY=
-knative.dev/pkg v0.0.0-20200528142800-1c6815d7e4c9/go.mod h1:QgNZTxnwpB/oSpNcfnLVlw+WpEwwyKAvJlvR3hgeltA=
-knative.dev/pkg v0.0.0-20200711004937-22502028e31a/go.mod h1:AqAJV6rYi8IGikDjJ/9ZQd9qKdkXVlesVnVjwx62YB8=
-knative.dev/pkg v0.0.0-20200811233205-6b7eccba3b9c h1:6bZkmyYu6towiuNw6WqOUkTrk8d2WhMzI7ku8ZLxgbI=
-knative.dev/pkg v0.0.0-20200811233205-6b7eccba3b9c/go.mod h1:udIbxBS/SJCL4sqnCG8HZArez9HjWmeqJCaVJP/h32I=
-knative.dev/test-infra v0.0.0-20200407185800-1b88cb3b45a5/go.mod h1:xcdUkMJrLlBswIZqL5zCuBFOC22WIPMQoVX1L35i0vQ=
-knative.dev/test-infra v0.0.0-20200505052144-5ea2f705bb55/go.mod h1:WqF1Azka+FxPZ20keR2zCNtiQA1MP9ZB4BH4HuI+SIU=
-knative.dev/test-infra v0.0.0-20200513011557-d03429a76034/go.mod h1:aMif0KXL4g19YCYwsy4Ocjjz5xgPlseYV+B95Oo4JGE=
-knative.dev/test-infra v0.0.0-20200519015156-82551620b0a9/go.mod h1:A5b2OAXTOeHT3hHhVQm3dmtbuWvIDP7qzgtqxA3/2pE=
-knative.dev/test-infra v0.0.0-20200707183444-aed09e56ddc7/go.mod h1:RjYAhXnZqeHw9+B0zsbqSPlae0lCvjekO/nw5ZMpLCs=
-knative.dev/test-infra v0.0.0-20200811030605-72f8c9f3e933 h1:1nfmLI9iQ87ygMeKGLREhH+2KYa6XX/e6enu0PsasHY=
-knative.dev/test-infra v0.0.0-20200811030605-72f8c9f3e933/go.mod h1:Pmg2c7Z7q7BGFUV/GOpU5BlrD3ePJft4MPqx8AYBplc=
+knative.dev/test-infra v0.0.0-20200722142057-3ca910b5a25e/go.mod h1:oHmDsPmq+zcc3b+Z94Kgmrz1JnmZEz36jmKuvL2Lw7o=
knative.dev/test-infra v0.0.0-20200811195106-afcd1747545f h1:VLGSL2XW7xty70B+SWezfmi2KHb3/OaJW7DtFvTr5BU=
knative.dev/test-infra v0.0.0-20200811195106-afcd1747545f/go.mod h1:Pmg2c7Z7q7BGFUV/GOpU5BlrD3ePJft4MPqx8AYBplc=
modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw=
@@ -1877,8 +1741,8 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/boskos v0.0.0-20200526191642-45fc818e2d00/go.mod h1:L1ubP7d1CCMSQSjKiZv6dGbh7b4kfoG+dFPj8cfYDnI=
sigs.k8s.io/boskos v0.0.0-20200617235605-f289ba6555ba/go.mod h1:ZO5RV+VxJS9mb6DvZ1yAjywoyq/wQ8b0vDoZxcIA5kE=
+sigs.k8s.io/boskos v0.0.0-20200717180850-7299d535c033/go.mod h1:ZO5RV+VxJS9mb6DvZ1yAjywoyq/wQ8b0vDoZxcIA5kE=
sigs.k8s.io/boskos v0.0.0-20200729174948-794df80db9c9/go.mod h1:ZO5RV+VxJS9mb6DvZ1yAjywoyq/wQ8b0vDoZxcIA5kE=
-sigs.k8s.io/controller-runtime v0.3.0/go.mod h1:Cw6PkEg0Sa7dAYovGT4R0tRkGhHXpYijwNxYhAnAZZk=
sigs.k8s.io/controller-runtime v0.5.0/go.mod h1:REiJzC7Y00U+2YkMbT8wxgrsX5USpXKGhb2sCtAXiT8=
sigs.k8s.io/controller-runtime v0.5.4/go.mod h1:JZUwSMVbxDupo0lTJSSFP5pimEyxGynROImSsqIOx1A=
sigs.k8s.io/controller-runtime v0.6.1/go.mod h1:XRYBPdbf5XJu9kpS84VJiZ7h/u1hF3gEORz0efEja7A=
@@ -1887,7 +1751,6 @@ sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:w
sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06/go.mod h1:/ULNhyfzRopfcjskuui0cTITekDduZ7ycKN3oUT9R18=
sigs.k8s.io/structured-merge-diff/v2 v2.0.1/go.mod h1:Wb7vfKAodbKgf6tn1Kl0VvGj7mRH6DGaRcixXEJXTsE=
sigs.k8s.io/structured-merge-diff/v3 v3.0.1-0.20200706213357-43c19bbb7fba/go.mod h1:V06abazjHneE37ZdSY/UUwPVgcJMKI/jU5XGUjgIKoc=
-sigs.k8s.io/testing_frameworks v0.1.1/go.mod h1:VVBKrHmJ6Ekkfz284YKhQePcdycOzNH9qL6ht1zEr/U=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
diff --git a/test/conformance/helpers/tracing/zipkin.go b/test/conformance/helpers/tracing/zipkin.go
index 5d30c36bbe9..25adb005875 100644
--- a/test/conformance/helpers/tracing/zipkin.go
+++ b/test/conformance/helpers/tracing/zipkin.go
@@ -27,5 +27,5 @@ import (
// Setup sets up port forwarding to Zipkin.
func Setup(t *testing.T, client *testlib.Client) {
- zipkin.SetupZipkinTracingFromConfigTracingOrFail(t, client.Kube.Kube, resources.SystemNamespace)
+ zipkin.SetupZipkinTracingFromConfigTracingOrFail(t, client.Config, client.Kube.Kube, resources.SystemNamespace)
}
diff --git a/test/e2e/helpers/broker_test_helper.go b/test/e2e/helpers/broker_test_helper.go
index f232378b5cc..d46267fe8b2 100644
--- a/test/e2e/helpers/broker_test_helper.go
+++ b/test/e2e/helpers/broker_test_helper.go
@@ -259,6 +259,7 @@ func TestBrokerWithManyTriggers(t *testing.T, brokerCreator BrokerCreator, shoul
subscriberName := "dumper-" + event.String()
eventTracker, _ := recordevents.StartEventRecordOrFail(client, subscriberName)
eventTrackers[subscriberName] = eventTracker
+
// Create trigger.
triggerName := "trigger-" + event.String()
client.CreateTriggerOrFailV1Beta1(triggerName,
diff --git a/test/lib/client.go b/test/lib/client.go
index f338a951708..b26e043473a 100644
--- a/test/lib/client.go
+++ b/test/lib/client.go
@@ -54,6 +54,8 @@ type Client struct {
podsCreated []string
tracingEnv corev1.EnvVar
+
+ cleanup func()
}
// NewClient instantiates and returns several clientsets required for making request to the
@@ -98,6 +100,31 @@ func NewClient(configPath string, clusterName string, namespace string, t *testi
return client, nil
}
+// Cleanup acts similarly to testing.T, but it's tied to the client lifecycle
+func (c *Client) Cleanup(f func()) {
+ oldCleanup := c.cleanup
+ c.cleanup = func() {
+ if oldCleanup != nil {
+ defer oldCleanup()
+ }
+ f()
+ }
+}
+
+func (c *Client) runCleanup() (err error) {
+ if c.cleanup == nil {
+ return nil
+ }
+ defer func() {
+ if panicVal := recover(); panicVal != nil {
+ err = fmt.Errorf("panic in cleanup function: %+v", panicVal)
+ }
+ }()
+
+ c.cleanup()
+ return nil
+}
+
func getTracingConfig(c *kubernetes.Clientset) (corev1.EnvVar, error) {
cm, err := c.CoreV1().ConfigMaps(resources.SystemNamespace).Get("config-tracing", metav1.GetOptions{})
if err != nil {
diff --git a/test/lib/recordevents/event_info.go b/test/lib/recordevents/event_info.go
index e3fac767959..dd714f07205 100644
--- a/test/lib/recordevents/event_info.go
+++ b/test/lib/recordevents/event_info.go
@@ -31,6 +31,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
+ "k8s.io/client-go/rest"
"knative.dev/pkg/test/monitoring"
"knative.dev/pkg/test/logging"
@@ -100,19 +101,20 @@ type eventGetter struct {
podName string
podNamespace string
podPort int
- kubeClientset kubernetes.Interface
+ kubeClientset *kubernetes.Clientset
+ kubeconfig *rest.Config
logf logging.FormatLogger
- host string
- port int
- forwardPID int
+ host string
+ port int
+ closeCh chan struct{}
}
// Creates a forwarded port to the specified recordevents pod and waits until
// it can successfully talk to the REST API. Times out after timeoutEvRetry
func newEventGetter(podName string, client *testlib.Client, logf logging.FormatLogger) (eventGetterInterface, error) {
egi := &eventGetter{podName: podName, podNamespace: client.Namespace,
- kubeClientset: client.Kube.Kube, podPort: RecordEventsPort, logf: logf}
+ kubeClientset: client.Kube.Kube, kubeconfig: client.Config, podPort: RecordEventsPort, logf: logf}
err := egi.forwardPort()
if err != nil {
return nil, err
@@ -161,14 +163,21 @@ func (eg *eventGetter) forwardPort() error {
return false, nil
}
- pid, err := monitoring.PortForward(eg.logf, pods, localPort, eg.podPort, eg.podNamespace)
+ closeCh, err := monitoring.PortForward(
+ eg.logf,
+ eg.kubeconfig,
+ eg.kubeClientset,
+ &pods.Items[0],
+ localPort,
+ eg.podPort,
+ )
if err != nil {
internalErr = err
return false, nil
}
internalErr = nil
- eg.forwardPID = pid
+ eg.closeCh = closeCh
eg.port = localPort
eg.host = "localhost"
return true, nil
@@ -258,10 +267,9 @@ func (eg *eventGetter) trimThrough(seqno int) error {
// Clean up the getter by tearing down the port forward.
func (eg *eventGetter) cleanup() {
- pid := eg.forwardPID
- eg.forwardPID = 0
- if pid != 0 {
- monitoring.Cleanup(pid)
+ if eg.closeCh != nil {
+ close(eg.closeCh)
+ eg.closeCh = nil
}
}
diff --git a/test/lib/recordevents/event_info_store.go b/test/lib/recordevents/event_info_store.go
index e8bde788934..3b0c46cf44e 100644
--- a/test/lib/recordevents/event_info_store.go
+++ b/test/lib/recordevents/event_info_store.go
@@ -93,7 +93,7 @@ func NewEventInfoStore(client *testlib.Client, podName string) (*EventInfoStore,
ei := newTestableEventInfoStore(egi, -1, -1)
ei.podName = podName
ei.tb = client.T
- client.T.Cleanup(ei.cleanup)
+ client.Cleanup(ei.cleanup)
return ei, nil
}
diff --git a/test/lib/test_runner.go b/test/lib/test_runner.go
index c32f04a05de..553ebeff08e 100644
--- a/test/lib/test_runner.go
+++ b/test/lib/test_runner.go
@@ -28,11 +28,12 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/storage/names"
- "knative.dev/eventing/pkg/utils"
pkgTest "knative.dev/pkg/test"
"knative.dev/pkg/test/helpers"
"knative.dev/pkg/test/prow"
+ "knative.dev/eventing/pkg/utils"
+
// Mysteriously required to support GCP auth (required by k8s libs).
// Apparently just importing it is enough. @_@ side effects @_@.
// https://github.com/kubernetes/client-go/issues/242
@@ -179,6 +180,10 @@ func makeK8sNamespace(baseFuncName string) string {
// TearDown will delete created names using clients.
func TearDown(client *Client) {
+ if err := client.runCleanup(); err != nil {
+ client.T.Logf("Cleanup error: %+v", err)
+ }
+
// Dump the events in the namespace
el, err := client.Kube.Kube.CoreV1().Events(client.Namespace).List(metav1.ListOptions{})
if err != nil {
diff --git a/test/test_images/recordevents/main.go b/test/test_images/recordevents/main.go
index 8dd74116338..3d44c476f96 100644
--- a/test/test_images/recordevents/main.go
+++ b/test/test_images/recordevents/main.go
@@ -50,6 +50,7 @@ func (er *eventRecorder) StartServer(port int) {
http.HandleFunc(recordevents.GetMinMaxPath, er.handleMinMax)
http.HandleFunc(recordevents.GetEntryPath, er.handleGetEntry)
http.HandleFunc(recordevents.TrimThroughPath, er.handleTrim)
+ log.Printf("Starting recordevents REST api server")
go http.ListenAndServe(fmt.Sprintf(":%d", port), nil)
}
@@ -173,6 +174,7 @@ func main() {
})
}
+ log.Printf("Starting event receiver")
err := http.ListenAndServe(":8080", handler)
if err != nil {
panic(err)
diff --git a/third_party/VENDOR-LICENSE/github.com/docker/spdystream/LICENSE b/third_party/VENDOR-LICENSE/github.com/docker/spdystream/LICENSE
new file mode 100644
index 00000000000..9e4bd4dbee9
--- /dev/null
+++ b/third_party/VENDOR-LICENSE/github.com/docker/spdystream/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2014-2015 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/third_party/VENDOR-LICENSE/go.uber.org/automaxprocs/LICENSE b/third_party/VENDOR-LICENSE/go.uber.org/automaxprocs/LICENSE
deleted file mode 100644
index 20dcf51d96d..00000000000
--- a/third_party/VENDOR-LICENSE/go.uber.org/automaxprocs/LICENSE
+++ /dev/null
@@ -1,19 +0,0 @@
-Copyright (c) 2017 Uber Technologies, Inc.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/BurntSushi/toml/.gitignore b/vendor/github.com/BurntSushi/toml/.gitignore
deleted file mode 100644
index 0cd3800377d..00000000000
--- a/vendor/github.com/BurntSushi/toml/.gitignore
+++ /dev/null
@@ -1,5 +0,0 @@
-TAGS
-tags
-.*.swp
-tomlcheck/tomlcheck
-toml.test
diff --git a/vendor/github.com/BurntSushi/toml/.travis.yml b/vendor/github.com/BurntSushi/toml/.travis.yml
deleted file mode 100644
index 8b8afc4f0e0..00000000000
--- a/vendor/github.com/BurntSushi/toml/.travis.yml
+++ /dev/null
@@ -1,15 +0,0 @@
-language: go
-go:
- - 1.1
- - 1.2
- - 1.3
- - 1.4
- - 1.5
- - 1.6
- - tip
-install:
- - go install ./...
- - go get github.com/BurntSushi/toml-test
-script:
- - export PATH="$PATH:$HOME/gopath/bin"
- - make test
diff --git a/vendor/github.com/BurntSushi/toml/COMPATIBLE b/vendor/github.com/BurntSushi/toml/COMPATIBLE
deleted file mode 100644
index 6efcfd0ce55..00000000000
--- a/vendor/github.com/BurntSushi/toml/COMPATIBLE
+++ /dev/null
@@ -1,3 +0,0 @@
-Compatible with TOML version
-[v0.4.0](https://github.com/toml-lang/toml/blob/v0.4.0/versions/en/toml-v0.4.0.md)
-
diff --git a/vendor/github.com/BurntSushi/toml/COPYING b/vendor/github.com/BurntSushi/toml/COPYING
deleted file mode 100644
index 01b5743200b..00000000000
--- a/vendor/github.com/BurntSushi/toml/COPYING
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2013 TOML authors
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
diff --git a/vendor/github.com/BurntSushi/toml/Makefile b/vendor/github.com/BurntSushi/toml/Makefile
deleted file mode 100644
index 3600848d331..00000000000
--- a/vendor/github.com/BurntSushi/toml/Makefile
+++ /dev/null
@@ -1,19 +0,0 @@
-install:
- go install ./...
-
-test: install
- go test -v
- toml-test toml-test-decoder
- toml-test -encoder toml-test-encoder
-
-fmt:
- gofmt -w *.go */*.go
- colcheck *.go */*.go
-
-tags:
- find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS
-
-push:
- git push origin master
- git push github master
-
diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md
deleted file mode 100644
index 7c1b37ecc7a..00000000000
--- a/vendor/github.com/BurntSushi/toml/README.md
+++ /dev/null
@@ -1,218 +0,0 @@
-## TOML parser and encoder for Go with reflection
-
-TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
-reflection interface similar to Go's standard library `json` and `xml`
-packages. This package also supports the `encoding.TextUnmarshaler` and
-`encoding.TextMarshaler` interfaces so that you can define custom data
-representations. (There is an example of this below.)
-
-Spec: https://github.com/toml-lang/toml
-
-Compatible with TOML version
-[v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md)
-
-Documentation: https://godoc.org/github.com/BurntSushi/toml
-
-Installation:
-
-```bash
-go get github.com/BurntSushi/toml
-```
-
-Try the toml validator:
-
-```bash
-go get github.com/BurntSushi/toml/cmd/tomlv
-tomlv some-toml-file.toml
-```
-
-[](https://travis-ci.org/BurntSushi/toml) [](https://godoc.org/github.com/BurntSushi/toml)
-
-### Testing
-
-This package passes all tests in
-[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder
-and the encoder.
-
-### Examples
-
-This package works similarly to how the Go standard library handles `XML`
-and `JSON`. Namely, data is loaded into Go values via reflection.
-
-For the simplest example, consider some TOML file as just a list of keys
-and values:
-
-```toml
-Age = 25
-Cats = [ "Cauchy", "Plato" ]
-Pi = 3.14
-Perfection = [ 6, 28, 496, 8128 ]
-DOB = 1987-07-05T05:45:00Z
-```
-
-Which could be defined in Go as:
-
-```go
-type Config struct {
- Age int
- Cats []string
- Pi float64
- Perfection []int
- DOB time.Time // requires `import time`
-}
-```
-
-And then decoded with:
-
-```go
-var conf Config
-if _, err := toml.Decode(tomlData, &conf); err != nil {
- // handle error
-}
-```
-
-You can also use struct tags if your struct field name doesn't map to a TOML
-key value directly:
-
-```toml
-some_key_NAME = "wat"
-```
-
-```go
-type TOML struct {
- ObscureKey string `toml:"some_key_NAME"`
-}
-```
-
-### Using the `encoding.TextUnmarshaler` interface
-
-Here's an example that automatically parses duration strings into
-`time.Duration` values:
-
-```toml
-[[song]]
-name = "Thunder Road"
-duration = "4m49s"
-
-[[song]]
-name = "Stairway to Heaven"
-duration = "8m03s"
-```
-
-Which can be decoded with:
-
-```go
-type song struct {
- Name string
- Duration duration
-}
-type songs struct {
- Song []song
-}
-var favorites songs
-if _, err := toml.Decode(blob, &favorites); err != nil {
- log.Fatal(err)
-}
-
-for _, s := range favorites.Song {
- fmt.Printf("%s (%s)\n", s.Name, s.Duration)
-}
-```
-
-And you'll also need a `duration` type that satisfies the
-`encoding.TextUnmarshaler` interface:
-
-```go
-type duration struct {
- time.Duration
-}
-
-func (d *duration) UnmarshalText(text []byte) error {
- var err error
- d.Duration, err = time.ParseDuration(string(text))
- return err
-}
-```
-
-### More complex usage
-
-Here's an example of how to load the example from the official spec page:
-
-```toml
-# This is a TOML document. Boom.
-
-title = "TOML Example"
-
-[owner]
-name = "Tom Preston-Werner"
-organization = "GitHub"
-bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
-dob = 1979-05-27T07:32:00Z # First class dates? Why not?
-
-[database]
-server = "192.168.1.1"
-ports = [ 8001, 8001, 8002 ]
-connection_max = 5000
-enabled = true
-
-[servers]
-
- # You can indent as you please. Tabs or spaces. TOML don't care.
- [servers.alpha]
- ip = "10.0.0.1"
- dc = "eqdc10"
-
- [servers.beta]
- ip = "10.0.0.2"
- dc = "eqdc10"
-
-[clients]
-data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
-
-# Line breaks are OK when inside arrays
-hosts = [
- "alpha",
- "omega"
-]
-```
-
-And the corresponding Go types are:
-
-```go
-type tomlConfig struct {
- Title string
- Owner ownerInfo
- DB database `toml:"database"`
- Servers map[string]server
- Clients clients
-}
-
-type ownerInfo struct {
- Name string
- Org string `toml:"organization"`
- Bio string
- DOB time.Time
-}
-
-type database struct {
- Server string
- Ports []int
- ConnMax int `toml:"connection_max"`
- Enabled bool
-}
-
-type server struct {
- IP string
- DC string
-}
-
-type clients struct {
- Data [][]interface{}
- Hosts []string
-}
-```
-
-Note that a case insensitive match will be tried if an exact match can't be
-found.
-
-A working example of the above can be found in `_examples/example.{go,toml}`.
diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go
deleted file mode 100644
index b0fd51d5b6e..00000000000
--- a/vendor/github.com/BurntSushi/toml/decode.go
+++ /dev/null
@@ -1,509 +0,0 @@
-package toml
-
-import (
- "fmt"
- "io"
- "io/ioutil"
- "math"
- "reflect"
- "strings"
- "time"
-)
-
-func e(format string, args ...interface{}) error {
- return fmt.Errorf("toml: "+format, args...)
-}
-
-// Unmarshaler is the interface implemented by objects that can unmarshal a
-// TOML description of themselves.
-type Unmarshaler interface {
- UnmarshalTOML(interface{}) error
-}
-
-// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`.
-func Unmarshal(p []byte, v interface{}) error {
- _, err := Decode(string(p), v)
- return err
-}
-
-// Primitive is a TOML value that hasn't been decoded into a Go value.
-// When using the various `Decode*` functions, the type `Primitive` may
-// be given to any value, and its decoding will be delayed.
-//
-// A `Primitive` value can be decoded using the `PrimitiveDecode` function.
-//
-// The underlying representation of a `Primitive` value is subject to change.
-// Do not rely on it.
-//
-// N.B. Primitive values are still parsed, so using them will only avoid
-// the overhead of reflection. They can be useful when you don't know the
-// exact type of TOML data until run time.
-type Primitive struct {
- undecoded interface{}
- context Key
-}
-
-// DEPRECATED!
-//
-// Use MetaData.PrimitiveDecode instead.
-func PrimitiveDecode(primValue Primitive, v interface{}) error {
- md := MetaData{decoded: make(map[string]bool)}
- return md.unify(primValue.undecoded, rvalue(v))
-}
-
-// PrimitiveDecode is just like the other `Decode*` functions, except it
-// decodes a TOML value that has already been parsed. Valid primitive values
-// can *only* be obtained from values filled by the decoder functions,
-// including this method. (i.e., `v` may contain more `Primitive`
-// values.)
-//
-// Meta data for primitive values is included in the meta data returned by
-// the `Decode*` functions with one exception: keys returned by the Undecoded
-// method will only reflect keys that were decoded. Namely, any keys hidden
-// behind a Primitive will be considered undecoded. Executing this method will
-// update the undecoded keys in the meta data. (See the example.)
-func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
- md.context = primValue.context
- defer func() { md.context = nil }()
- return md.unify(primValue.undecoded, rvalue(v))
-}
-
-// Decode will decode the contents of `data` in TOML format into a pointer
-// `v`.
-//
-// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be
-// used interchangeably.)
-//
-// TOML arrays of tables correspond to either a slice of structs or a slice
-// of maps.
-//
-// TOML datetimes correspond to Go `time.Time` values.
-//
-// All other TOML types (float, string, int, bool and array) correspond
-// to the obvious Go types.
-//
-// An exception to the above rules is if a type implements the
-// encoding.TextUnmarshaler interface. In this case, any primitive TOML value
-// (floats, strings, integers, booleans and datetimes) will be converted to
-// a byte string and given to the value's UnmarshalText method. See the
-// Unmarshaler example for a demonstration with time duration strings.
-//
-// Key mapping
-//
-// TOML keys can map to either keys in a Go map or field names in a Go
-// struct. The special `toml` struct tag may be used to map TOML keys to
-// struct fields that don't match the key name exactly. (See the example.)
-// A case insensitive match to struct names will be tried if an exact match
-// can't be found.
-//
-// The mapping between TOML values and Go values is loose. That is, there
-// may exist TOML values that cannot be placed into your representation, and
-// there may be parts of your representation that do not correspond to
-// TOML values. This loose mapping can be made stricter by using the IsDefined
-// and/or Undecoded methods on the MetaData returned.
-//
-// This decoder will not handle cyclic types. If a cyclic type is passed,
-// `Decode` will not terminate.
-func Decode(data string, v interface{}) (MetaData, error) {
- rv := reflect.ValueOf(v)
- if rv.Kind() != reflect.Ptr {
- return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v))
- }
- if rv.IsNil() {
- return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v))
- }
- p, err := parse(data)
- if err != nil {
- return MetaData{}, err
- }
- md := MetaData{
- p.mapping, p.types, p.ordered,
- make(map[string]bool, len(p.ordered)), nil,
- }
- return md, md.unify(p.mapping, indirect(rv))
-}
-
-// DecodeFile is just like Decode, except it will automatically read the
-// contents of the file at `fpath` and decode it for you.
-func DecodeFile(fpath string, v interface{}) (MetaData, error) {
- bs, err := ioutil.ReadFile(fpath)
- if err != nil {
- return MetaData{}, err
- }
- return Decode(string(bs), v)
-}
-
-// DecodeReader is just like Decode, except it will consume all bytes
-// from the reader and decode it for you.
-func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
- bs, err := ioutil.ReadAll(r)
- if err != nil {
- return MetaData{}, err
- }
- return Decode(string(bs), v)
-}
-
-// unify performs a sort of type unification based on the structure of `rv`,
-// which is the client representation.
-//
-// Any type mismatch produces an error. Finding a type that we don't know
-// how to handle produces an unsupported type error.
-func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
-
- // Special case. Look for a `Primitive` value.
- if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
- // Save the undecoded data and the key context into the primitive
- // value.
- context := make(Key, len(md.context))
- copy(context, md.context)
- rv.Set(reflect.ValueOf(Primitive{
- undecoded: data,
- context: context,
- }))
- return nil
- }
-
- // Special case. Unmarshaler Interface support.
- if rv.CanAddr() {
- if v, ok := rv.Addr().Interface().(Unmarshaler); ok {
- return v.UnmarshalTOML(data)
- }
- }
-
- // Special case. Handle time.Time values specifically.
- // TODO: Remove this code when we decide to drop support for Go 1.1.
- // This isn't necessary in Go 1.2 because time.Time satisfies the encoding
- // interfaces.
- if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) {
- return md.unifyDatetime(data, rv)
- }
-
- // Special case. Look for a value satisfying the TextUnmarshaler interface.
- if v, ok := rv.Interface().(TextUnmarshaler); ok {
- return md.unifyText(data, v)
- }
- // BUG(burntsushi)
- // The behavior here is incorrect whenever a Go type satisfies the
- // encoding.TextUnmarshaler interface but also corresponds to a TOML
- // hash or array. In particular, the unmarshaler should only be applied
- // to primitive TOML values. But at this point, it will be applied to
- // all kinds of values and produce an incorrect error whenever those values
- // are hashes or arrays (including arrays of tables).
-
- k := rv.Kind()
-
- // laziness
- if k >= reflect.Int && k <= reflect.Uint64 {
- return md.unifyInt(data, rv)
- }
- switch k {
- case reflect.Ptr:
- elem := reflect.New(rv.Type().Elem())
- err := md.unify(data, reflect.Indirect(elem))
- if err != nil {
- return err
- }
- rv.Set(elem)
- return nil
- case reflect.Struct:
- return md.unifyStruct(data, rv)
- case reflect.Map:
- return md.unifyMap(data, rv)
- case reflect.Array:
- return md.unifyArray(data, rv)
- case reflect.Slice:
- return md.unifySlice(data, rv)
- case reflect.String:
- return md.unifyString(data, rv)
- case reflect.Bool:
- return md.unifyBool(data, rv)
- case reflect.Interface:
- // we only support empty interfaces.
- if rv.NumMethod() > 0 {
- return e("unsupported type %s", rv.Type())
- }
- return md.unifyAnything(data, rv)
- case reflect.Float32:
- fallthrough
- case reflect.Float64:
- return md.unifyFloat64(data, rv)
- }
- return e("unsupported type %s", rv.Kind())
-}
-
-func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
- tmap, ok := mapping.(map[string]interface{})
- if !ok {
- if mapping == nil {
- return nil
- }
- return e("type mismatch for %s: expected table but found %T",
- rv.Type().String(), mapping)
- }
-
- for key, datum := range tmap {
- var f *field
- fields := cachedTypeFields(rv.Type())
- for i := range fields {
- ff := &fields[i]
- if ff.name == key {
- f = ff
- break
- }
- if f == nil && strings.EqualFold(ff.name, key) {
- f = ff
- }
- }
- if f != nil {
- subv := rv
- for _, i := range f.index {
- subv = indirect(subv.Field(i))
- }
- if isUnifiable(subv) {
- md.decoded[md.context.add(key).String()] = true
- md.context = append(md.context, key)
- if err := md.unify(datum, subv); err != nil {
- return err
- }
- md.context = md.context[0 : len(md.context)-1]
- } else if f.name != "" {
- // Bad user! No soup for you!
- return e("cannot write unexported field %s.%s",
- rv.Type().String(), f.name)
- }
- }
- }
- return nil
-}
-
-func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
- tmap, ok := mapping.(map[string]interface{})
- if !ok {
- if tmap == nil {
- return nil
- }
- return badtype("map", mapping)
- }
- if rv.IsNil() {
- rv.Set(reflect.MakeMap(rv.Type()))
- }
- for k, v := range tmap {
- md.decoded[md.context.add(k).String()] = true
- md.context = append(md.context, k)
-
- rvkey := indirect(reflect.New(rv.Type().Key()))
- rvval := reflect.Indirect(reflect.New(rv.Type().Elem()))
- if err := md.unify(v, rvval); err != nil {
- return err
- }
- md.context = md.context[0 : len(md.context)-1]
-
- rvkey.SetString(k)
- rv.SetMapIndex(rvkey, rvval)
- }
- return nil
-}
-
-func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
- datav := reflect.ValueOf(data)
- if datav.Kind() != reflect.Slice {
- if !datav.IsValid() {
- return nil
- }
- return badtype("slice", data)
- }
- sliceLen := datav.Len()
- if sliceLen != rv.Len() {
- return e("expected array length %d; got TOML array of length %d",
- rv.Len(), sliceLen)
- }
- return md.unifySliceArray(datav, rv)
-}
-
-func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
- datav := reflect.ValueOf(data)
- if datav.Kind() != reflect.Slice {
- if !datav.IsValid() {
- return nil
- }
- return badtype("slice", data)
- }
- n := datav.Len()
- if rv.IsNil() || rv.Cap() < n {
- rv.Set(reflect.MakeSlice(rv.Type(), n, n))
- }
- rv.SetLen(n)
- return md.unifySliceArray(datav, rv)
-}
-
-func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
- sliceLen := data.Len()
- for i := 0; i < sliceLen; i++ {
- v := data.Index(i).Interface()
- sliceval := indirect(rv.Index(i))
- if err := md.unify(v, sliceval); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error {
- if _, ok := data.(time.Time); ok {
- rv.Set(reflect.ValueOf(data))
- return nil
- }
- return badtype("time.Time", data)
-}
-
-func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
- if s, ok := data.(string); ok {
- rv.SetString(s)
- return nil
- }
- return badtype("string", data)
-}
-
-func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
- if num, ok := data.(float64); ok {
- switch rv.Kind() {
- case reflect.Float32:
- fallthrough
- case reflect.Float64:
- rv.SetFloat(num)
- default:
- panic("bug")
- }
- return nil
- }
- return badtype("float", data)
-}
-
-func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
- if num, ok := data.(int64); ok {
- if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 {
- switch rv.Kind() {
- case reflect.Int, reflect.Int64:
- // No bounds checking necessary.
- case reflect.Int8:
- if num < math.MinInt8 || num > math.MaxInt8 {
- return e("value %d is out of range for int8", num)
- }
- case reflect.Int16:
- if num < math.MinInt16 || num > math.MaxInt16 {
- return e("value %d is out of range for int16", num)
- }
- case reflect.Int32:
- if num < math.MinInt32 || num > math.MaxInt32 {
- return e("value %d is out of range for int32", num)
- }
- }
- rv.SetInt(num)
- } else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 {
- unum := uint64(num)
- switch rv.Kind() {
- case reflect.Uint, reflect.Uint64:
- // No bounds checking necessary.
- case reflect.Uint8:
- if num < 0 || unum > math.MaxUint8 {
- return e("value %d is out of range for uint8", num)
- }
- case reflect.Uint16:
- if num < 0 || unum > math.MaxUint16 {
- return e("value %d is out of range for uint16", num)
- }
- case reflect.Uint32:
- if num < 0 || unum > math.MaxUint32 {
- return e("value %d is out of range for uint32", num)
- }
- }
- rv.SetUint(unum)
- } else {
- panic("unreachable")
- }
- return nil
- }
- return badtype("integer", data)
-}
-
-func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
- if b, ok := data.(bool); ok {
- rv.SetBool(b)
- return nil
- }
- return badtype("boolean", data)
-}
-
-func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
- rv.Set(reflect.ValueOf(data))
- return nil
-}
-
-func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error {
- var s string
- switch sdata := data.(type) {
- case TextMarshaler:
- text, err := sdata.MarshalText()
- if err != nil {
- return err
- }
- s = string(text)
- case fmt.Stringer:
- s = sdata.String()
- case string:
- s = sdata
- case bool:
- s = fmt.Sprintf("%v", sdata)
- case int64:
- s = fmt.Sprintf("%d", sdata)
- case float64:
- s = fmt.Sprintf("%f", sdata)
- default:
- return badtype("primitive (string-like)", data)
- }
- if err := v.UnmarshalText([]byte(s)); err != nil {
- return err
- }
- return nil
-}
-
-// rvalue returns a reflect.Value of `v`. All pointers are resolved.
-func rvalue(v interface{}) reflect.Value {
- return indirect(reflect.ValueOf(v))
-}
-
-// indirect returns the value pointed to by a pointer.
-// Pointers are followed until the value is not a pointer.
-// New values are allocated for each nil pointer.
-//
-// An exception to this rule is if the value satisfies an interface of
-// interest to us (like encoding.TextUnmarshaler).
-func indirect(v reflect.Value) reflect.Value {
- if v.Kind() != reflect.Ptr {
- if v.CanSet() {
- pv := v.Addr()
- if _, ok := pv.Interface().(TextUnmarshaler); ok {
- return pv
- }
- }
- return v
- }
- if v.IsNil() {
- v.Set(reflect.New(v.Type().Elem()))
- }
- return indirect(reflect.Indirect(v))
-}
-
-func isUnifiable(rv reflect.Value) bool {
- if rv.CanSet() {
- return true
- }
- if _, ok := rv.Interface().(TextUnmarshaler); ok {
- return true
- }
- return false
-}
-
-func badtype(expected string, data interface{}) error {
- return e("cannot load TOML value of type %T into a Go %s", data, expected)
-}
diff --git a/vendor/github.com/BurntSushi/toml/decode_meta.go b/vendor/github.com/BurntSushi/toml/decode_meta.go
deleted file mode 100644
index b9914a6798c..00000000000
--- a/vendor/github.com/BurntSushi/toml/decode_meta.go
+++ /dev/null
@@ -1,121 +0,0 @@
-package toml
-
-import "strings"
-
-// MetaData allows access to meta information about TOML data that may not
-// be inferrable via reflection. In particular, whether a key has been defined
-// and the TOML type of a key.
-type MetaData struct {
- mapping map[string]interface{}
- types map[string]tomlType
- keys []Key
- decoded map[string]bool
- context Key // Used only during decoding.
-}
-
-// IsDefined returns true if the key given exists in the TOML data. The key
-// should be specified hierarchially. e.g.,
-//
-// // access the TOML key 'a.b.c'
-// IsDefined("a", "b", "c")
-//
-// IsDefined will return false if an empty key given. Keys are case sensitive.
-func (md *MetaData) IsDefined(key ...string) bool {
- if len(key) == 0 {
- return false
- }
-
- var hash map[string]interface{}
- var ok bool
- var hashOrVal interface{} = md.mapping
- for _, k := range key {
- if hash, ok = hashOrVal.(map[string]interface{}); !ok {
- return false
- }
- if hashOrVal, ok = hash[k]; !ok {
- return false
- }
- }
- return true
-}
-
-// Type returns a string representation of the type of the key specified.
-//
-// Type will return the empty string if given an empty key or a key that
-// does not exist. Keys are case sensitive.
-func (md *MetaData) Type(key ...string) string {
- fullkey := strings.Join(key, ".")
- if typ, ok := md.types[fullkey]; ok {
- return typ.typeString()
- }
- return ""
-}
-
-// Key is the type of any TOML key, including key groups. Use (MetaData).Keys
-// to get values of this type.
-type Key []string
-
-func (k Key) String() string {
- return strings.Join(k, ".")
-}
-
-func (k Key) maybeQuotedAll() string {
- var ss []string
- for i := range k {
- ss = append(ss, k.maybeQuoted(i))
- }
- return strings.Join(ss, ".")
-}
-
-func (k Key) maybeQuoted(i int) string {
- quote := false
- for _, c := range k[i] {
- if !isBareKeyChar(c) {
- quote = true
- break
- }
- }
- if quote {
- return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\""
- }
- return k[i]
-}
-
-func (k Key) add(piece string) Key {
- newKey := make(Key, len(k)+1)
- copy(newKey, k)
- newKey[len(k)] = piece
- return newKey
-}
-
-// Keys returns a slice of every key in the TOML data, including key groups.
-// Each key is itself a slice, where the first element is the top of the
-// hierarchy and the last is the most specific.
-//
-// The list will have the same order as the keys appeared in the TOML data.
-//
-// All keys returned are non-empty.
-func (md *MetaData) Keys() []Key {
- return md.keys
-}
-
-// Undecoded returns all keys that have not been decoded in the order in which
-// they appear in the original TOML document.
-//
-// This includes keys that haven't been decoded because of a Primitive value.
-// Once the Primitive value is decoded, the keys will be considered decoded.
-//
-// Also note that decoding into an empty interface will result in no decoding,
-// and so no keys will be considered decoded.
-//
-// In this sense, the Undecoded keys correspond to keys in the TOML document
-// that do not have a concrete type in your representation.
-func (md *MetaData) Undecoded() []Key {
- undecoded := make([]Key, 0, len(md.keys))
- for _, key := range md.keys {
- if !md.decoded[key.String()] {
- undecoded = append(undecoded, key)
- }
- }
- return undecoded
-}
diff --git a/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/BurntSushi/toml/doc.go
deleted file mode 100644
index b371f396edc..00000000000
--- a/vendor/github.com/BurntSushi/toml/doc.go
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
-Package toml provides facilities for decoding and encoding TOML configuration
-files via reflection. There is also support for delaying decoding with
-the Primitive type, and querying the set of keys in a TOML document with the
-MetaData type.
-
-The specification implemented: https://github.com/toml-lang/toml
-
-The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify
-whether a file is a valid TOML document. It can also be used to print the
-type of each key in a TOML document.
-
-Testing
-
-There are two important types of tests used for this package. The first is
-contained inside '*_test.go' files and uses the standard Go unit testing
-framework. These tests are primarily devoted to holistically testing the
-decoder and encoder.
-
-The second type of testing is used to verify the implementation's adherence
-to the TOML specification. These tests have been factored into their own
-project: https://github.com/BurntSushi/toml-test
-
-The reason the tests are in a separate project is so that they can be used by
-any implementation of TOML. Namely, it is language agnostic.
-*/
-package toml
diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go
deleted file mode 100644
index d905c21a246..00000000000
--- a/vendor/github.com/BurntSushi/toml/encode.go
+++ /dev/null
@@ -1,568 +0,0 @@
-package toml
-
-import (
- "bufio"
- "errors"
- "fmt"
- "io"
- "reflect"
- "sort"
- "strconv"
- "strings"
- "time"
-)
-
-type tomlEncodeError struct{ error }
-
-var (
- errArrayMixedElementTypes = errors.New(
- "toml: cannot encode array with mixed element types")
- errArrayNilElement = errors.New(
- "toml: cannot encode array with nil element")
- errNonString = errors.New(
- "toml: cannot encode a map with non-string key type")
- errAnonNonStruct = errors.New(
- "toml: cannot encode an anonymous field that is not a struct")
- errArrayNoTable = errors.New(
- "toml: TOML array element cannot contain a table")
- errNoKey = errors.New(
- "toml: top-level values must be Go maps or structs")
- errAnything = errors.New("") // used in testing
-)
-
-var quotedReplacer = strings.NewReplacer(
- "\t", "\\t",
- "\n", "\\n",
- "\r", "\\r",
- "\"", "\\\"",
- "\\", "\\\\",
-)
-
-// Encoder controls the encoding of Go values to a TOML document to some
-// io.Writer.
-//
-// The indentation level can be controlled with the Indent field.
-type Encoder struct {
- // A single indentation level. By default it is two spaces.
- Indent string
-
- // hasWritten is whether we have written any output to w yet.
- hasWritten bool
- w *bufio.Writer
-}
-
-// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer
-// given. By default, a single indentation level is 2 spaces.
-func NewEncoder(w io.Writer) *Encoder {
- return &Encoder{
- w: bufio.NewWriter(w),
- Indent: " ",
- }
-}
-
-// Encode writes a TOML representation of the Go value to the underlying
-// io.Writer. If the value given cannot be encoded to a valid TOML document,
-// then an error is returned.
-//
-// The mapping between Go values and TOML values should be precisely the same
-// as for the Decode* functions. Similarly, the TextMarshaler interface is
-// supported by encoding the resulting bytes as strings. (If you want to write
-// arbitrary binary data then you will need to use something like base64 since
-// TOML does not have any binary types.)
-//
-// When encoding TOML hashes (i.e., Go maps or structs), keys without any
-// sub-hashes are encoded first.
-//
-// If a Go map is encoded, then its keys are sorted alphabetically for
-// deterministic output. More control over this behavior may be provided if
-// there is demand for it.
-//
-// Encoding Go values without a corresponding TOML representation---like map
-// types with non-string keys---will cause an error to be returned. Similarly
-// for mixed arrays/slices, arrays/slices with nil elements, embedded
-// non-struct types and nested slices containing maps or structs.
-// (e.g., [][]map[string]string is not allowed but []map[string]string is OK
-// and so is []map[string][]string.)
-func (enc *Encoder) Encode(v interface{}) error {
- rv := eindirect(reflect.ValueOf(v))
- if err := enc.safeEncode(Key([]string{}), rv); err != nil {
- return err
- }
- return enc.w.Flush()
-}
-
-func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) {
- defer func() {
- if r := recover(); r != nil {
- if terr, ok := r.(tomlEncodeError); ok {
- err = terr.error
- return
- }
- panic(r)
- }
- }()
- enc.encode(key, rv)
- return nil
-}
-
-func (enc *Encoder) encode(key Key, rv reflect.Value) {
- // Special case. Time needs to be in ISO8601 format.
- // Special case. If we can marshal the type to text, then we used that.
- // Basically, this prevents the encoder for handling these types as
- // generic structs (or whatever the underlying type of a TextMarshaler is).
- switch rv.Interface().(type) {
- case time.Time, TextMarshaler:
- enc.keyEqElement(key, rv)
- return
- }
-
- k := rv.Kind()
- switch k {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
- reflect.Int64,
- reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
- reflect.Uint64,
- reflect.Float32, reflect.Float64, reflect.String, reflect.Bool:
- enc.keyEqElement(key, rv)
- case reflect.Array, reflect.Slice:
- if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) {
- enc.eArrayOfTables(key, rv)
- } else {
- enc.keyEqElement(key, rv)
- }
- case reflect.Interface:
- if rv.IsNil() {
- return
- }
- enc.encode(key, rv.Elem())
- case reflect.Map:
- if rv.IsNil() {
- return
- }
- enc.eTable(key, rv)
- case reflect.Ptr:
- if rv.IsNil() {
- return
- }
- enc.encode(key, rv.Elem())
- case reflect.Struct:
- enc.eTable(key, rv)
- default:
- panic(e("unsupported type for key '%s': %s", key, k))
- }
-}
-
-// eElement encodes any value that can be an array element (primitives and
-// arrays).
-func (enc *Encoder) eElement(rv reflect.Value) {
- switch v := rv.Interface().(type) {
- case time.Time:
- // Special case time.Time as a primitive. Has to come before
- // TextMarshaler below because time.Time implements
- // encoding.TextMarshaler, but we need to always use UTC.
- enc.wf(v.UTC().Format("2006-01-02T15:04:05Z"))
- return
- case TextMarshaler:
- // Special case. Use text marshaler if it's available for this value.
- if s, err := v.MarshalText(); err != nil {
- encPanic(err)
- } else {
- enc.writeQuoted(string(s))
- }
- return
- }
- switch rv.Kind() {
- case reflect.Bool:
- enc.wf(strconv.FormatBool(rv.Bool()))
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
- reflect.Int64:
- enc.wf(strconv.FormatInt(rv.Int(), 10))
- case reflect.Uint, reflect.Uint8, reflect.Uint16,
- reflect.Uint32, reflect.Uint64:
- enc.wf(strconv.FormatUint(rv.Uint(), 10))
- case reflect.Float32:
- enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32)))
- case reflect.Float64:
- enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64)))
- case reflect.Array, reflect.Slice:
- enc.eArrayOrSliceElement(rv)
- case reflect.Interface:
- enc.eElement(rv.Elem())
- case reflect.String:
- enc.writeQuoted(rv.String())
- default:
- panic(e("unexpected primitive type: %s", rv.Kind()))
- }
-}
-
-// By the TOML spec, all floats must have a decimal with at least one
-// number on either side.
-func floatAddDecimal(fstr string) string {
- if !strings.Contains(fstr, ".") {
- return fstr + ".0"
- }
- return fstr
-}
-
-func (enc *Encoder) writeQuoted(s string) {
- enc.wf("\"%s\"", quotedReplacer.Replace(s))
-}
-
-func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
- length := rv.Len()
- enc.wf("[")
- for i := 0; i < length; i++ {
- elem := rv.Index(i)
- enc.eElement(elem)
- if i != length-1 {
- enc.wf(", ")
- }
- }
- enc.wf("]")
-}
-
-func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
- if len(key) == 0 {
- encPanic(errNoKey)
- }
- for i := 0; i < rv.Len(); i++ {
- trv := rv.Index(i)
- if isNil(trv) {
- continue
- }
- panicIfInvalidKey(key)
- enc.newline()
- enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll())
- enc.newline()
- enc.eMapOrStruct(key, trv)
- }
-}
-
-func (enc *Encoder) eTable(key Key, rv reflect.Value) {
- panicIfInvalidKey(key)
- if len(key) == 1 {
- // Output an extra newline between top-level tables.
- // (The newline isn't written if nothing else has been written though.)
- enc.newline()
- }
- if len(key) > 0 {
- enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll())
- enc.newline()
- }
- enc.eMapOrStruct(key, rv)
-}
-
-func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) {
- switch rv := eindirect(rv); rv.Kind() {
- case reflect.Map:
- enc.eMap(key, rv)
- case reflect.Struct:
- enc.eStruct(key, rv)
- default:
- panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String())
- }
-}
-
-func (enc *Encoder) eMap(key Key, rv reflect.Value) {
- rt := rv.Type()
- if rt.Key().Kind() != reflect.String {
- encPanic(errNonString)
- }
-
- // Sort keys so that we have deterministic output. And write keys directly
- // underneath this key first, before writing sub-structs or sub-maps.
- var mapKeysDirect, mapKeysSub []string
- for _, mapKey := range rv.MapKeys() {
- k := mapKey.String()
- if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) {
- mapKeysSub = append(mapKeysSub, k)
- } else {
- mapKeysDirect = append(mapKeysDirect, k)
- }
- }
-
- var writeMapKeys = func(mapKeys []string) {
- sort.Strings(mapKeys)
- for _, mapKey := range mapKeys {
- mrv := rv.MapIndex(reflect.ValueOf(mapKey))
- if isNil(mrv) {
- // Don't write anything for nil fields.
- continue
- }
- enc.encode(key.add(mapKey), mrv)
- }
- }
- writeMapKeys(mapKeysDirect)
- writeMapKeys(mapKeysSub)
-}
-
-func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
- // Write keys for fields directly under this key first, because if we write
- // a field that creates a new table, then all keys under it will be in that
- // table (not the one we're writing here).
- rt := rv.Type()
- var fieldsDirect, fieldsSub [][]int
- var addFields func(rt reflect.Type, rv reflect.Value, start []int)
- addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
- for i := 0; i < rt.NumField(); i++ {
- f := rt.Field(i)
- // skip unexported fields
- if f.PkgPath != "" && !f.Anonymous {
- continue
- }
- frv := rv.Field(i)
- if f.Anonymous {
- t := f.Type
- switch t.Kind() {
- case reflect.Struct:
- // Treat anonymous struct fields with
- // tag names as though they are not
- // anonymous, like encoding/json does.
- if getOptions(f.Tag).name == "" {
- addFields(t, frv, f.Index)
- continue
- }
- case reflect.Ptr:
- if t.Elem().Kind() == reflect.Struct &&
- getOptions(f.Tag).name == "" {
- if !frv.IsNil() {
- addFields(t.Elem(), frv.Elem(), f.Index)
- }
- continue
- }
- // Fall through to the normal field encoding logic below
- // for non-struct anonymous fields.
- }
- }
-
- if typeIsHash(tomlTypeOfGo(frv)) {
- fieldsSub = append(fieldsSub, append(start, f.Index...))
- } else {
- fieldsDirect = append(fieldsDirect, append(start, f.Index...))
- }
- }
- }
- addFields(rt, rv, nil)
-
- var writeFields = func(fields [][]int) {
- for _, fieldIndex := range fields {
- sft := rt.FieldByIndex(fieldIndex)
- sf := rv.FieldByIndex(fieldIndex)
- if isNil(sf) {
- // Don't write anything for nil fields.
- continue
- }
-
- opts := getOptions(sft.Tag)
- if opts.skip {
- continue
- }
- keyName := sft.Name
- if opts.name != "" {
- keyName = opts.name
- }
- if opts.omitempty && isEmpty(sf) {
- continue
- }
- if opts.omitzero && isZero(sf) {
- continue
- }
-
- enc.encode(key.add(keyName), sf)
- }
- }
- writeFields(fieldsDirect)
- writeFields(fieldsSub)
-}
-
-// tomlTypeName returns the TOML type name of the Go value's type. It is
-// used to determine whether the types of array elements are mixed (which is
-// forbidden). If the Go value is nil, then it is illegal for it to be an array
-// element, and valueIsNil is returned as true.
-
-// Returns the TOML type of a Go value. The type may be `nil`, which means
-// no concrete TOML type could be found.
-func tomlTypeOfGo(rv reflect.Value) tomlType {
- if isNil(rv) || !rv.IsValid() {
- return nil
- }
- switch rv.Kind() {
- case reflect.Bool:
- return tomlBool
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
- reflect.Int64,
- reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
- reflect.Uint64:
- return tomlInteger
- case reflect.Float32, reflect.Float64:
- return tomlFloat
- case reflect.Array, reflect.Slice:
- if typeEqual(tomlHash, tomlArrayType(rv)) {
- return tomlArrayHash
- }
- return tomlArray
- case reflect.Ptr, reflect.Interface:
- return tomlTypeOfGo(rv.Elem())
- case reflect.String:
- return tomlString
- case reflect.Map:
- return tomlHash
- case reflect.Struct:
- switch rv.Interface().(type) {
- case time.Time:
- return tomlDatetime
- case TextMarshaler:
- return tomlString
- default:
- return tomlHash
- }
- default:
- panic("unexpected reflect.Kind: " + rv.Kind().String())
- }
-}
-
-// tomlArrayType returns the element type of a TOML array. The type returned
-// may be nil if it cannot be determined (e.g., a nil slice or a zero length
-// slize). This function may also panic if it finds a type that cannot be
-// expressed in TOML (such as nil elements, heterogeneous arrays or directly
-// nested arrays of tables).
-func tomlArrayType(rv reflect.Value) tomlType {
- if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
- return nil
- }
- firstType := tomlTypeOfGo(rv.Index(0))
- if firstType == nil {
- encPanic(errArrayNilElement)
- }
-
- rvlen := rv.Len()
- for i := 1; i < rvlen; i++ {
- elem := rv.Index(i)
- switch elemType := tomlTypeOfGo(elem); {
- case elemType == nil:
- encPanic(errArrayNilElement)
- case !typeEqual(firstType, elemType):
- encPanic(errArrayMixedElementTypes)
- }
- }
- // If we have a nested array, then we must make sure that the nested
- // array contains ONLY primitives.
- // This checks arbitrarily nested arrays.
- if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) {
- nest := tomlArrayType(eindirect(rv.Index(0)))
- if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) {
- encPanic(errArrayNoTable)
- }
- }
- return firstType
-}
-
-type tagOptions struct {
- skip bool // "-"
- name string
- omitempty bool
- omitzero bool
-}
-
-func getOptions(tag reflect.StructTag) tagOptions {
- t := tag.Get("toml")
- if t == "-" {
- return tagOptions{skip: true}
- }
- var opts tagOptions
- parts := strings.Split(t, ",")
- opts.name = parts[0]
- for _, s := range parts[1:] {
- switch s {
- case "omitempty":
- opts.omitempty = true
- case "omitzero":
- opts.omitzero = true
- }
- }
- return opts
-}
-
-func isZero(rv reflect.Value) bool {
- switch rv.Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return rv.Int() == 0
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- return rv.Uint() == 0
- case reflect.Float32, reflect.Float64:
- return rv.Float() == 0.0
- }
- return false
-}
-
-func isEmpty(rv reflect.Value) bool {
- switch rv.Kind() {
- case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
- return rv.Len() == 0
- case reflect.Bool:
- return !rv.Bool()
- }
- return false
-}
-
-func (enc *Encoder) newline() {
- if enc.hasWritten {
- enc.wf("\n")
- }
-}
-
-func (enc *Encoder) keyEqElement(key Key, val reflect.Value) {
- if len(key) == 0 {
- encPanic(errNoKey)
- }
- panicIfInvalidKey(key)
- enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
- enc.eElement(val)
- enc.newline()
-}
-
-func (enc *Encoder) wf(format string, v ...interface{}) {
- if _, err := fmt.Fprintf(enc.w, format, v...); err != nil {
- encPanic(err)
- }
- enc.hasWritten = true
-}
-
-func (enc *Encoder) indentStr(key Key) string {
- return strings.Repeat(enc.Indent, len(key)-1)
-}
-
-func encPanic(err error) {
- panic(tomlEncodeError{err})
-}
-
-func eindirect(v reflect.Value) reflect.Value {
- switch v.Kind() {
- case reflect.Ptr, reflect.Interface:
- return eindirect(v.Elem())
- default:
- return v
- }
-}
-
-func isNil(rv reflect.Value) bool {
- switch rv.Kind() {
- case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
- return rv.IsNil()
- default:
- return false
- }
-}
-
-func panicIfInvalidKey(key Key) {
- for _, k := range key {
- if len(k) == 0 {
- encPanic(e("Key '%s' is not a valid table name. Key names "+
- "cannot be empty.", key.maybeQuotedAll()))
- }
- }
-}
-
-func isValidKeyName(s string) bool {
- return len(s) != 0
-}
diff --git a/vendor/github.com/BurntSushi/toml/encoding_types.go b/vendor/github.com/BurntSushi/toml/encoding_types.go
deleted file mode 100644
index d36e1dd6002..00000000000
--- a/vendor/github.com/BurntSushi/toml/encoding_types.go
+++ /dev/null
@@ -1,19 +0,0 @@
-// +build go1.2
-
-package toml
-
-// In order to support Go 1.1, we define our own TextMarshaler and
-// TextUnmarshaler types. For Go 1.2+, we just alias them with the
-// standard library interfaces.
-
-import (
- "encoding"
-)
-
-// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
-// so that Go 1.1 can be supported.
-type TextMarshaler encoding.TextMarshaler
-
-// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
-// here so that Go 1.1 can be supported.
-type TextUnmarshaler encoding.TextUnmarshaler
diff --git a/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go b/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go
deleted file mode 100644
index e8d503d0469..00000000000
--- a/vendor/github.com/BurntSushi/toml/encoding_types_1.1.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// +build !go1.2
-
-package toml
-
-// These interfaces were introduced in Go 1.2, so we add them manually when
-// compiling for Go 1.1.
-
-// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
-// so that Go 1.1 can be supported.
-type TextMarshaler interface {
- MarshalText() (text []byte, err error)
-}
-
-// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
-// here so that Go 1.1 can be supported.
-type TextUnmarshaler interface {
- UnmarshalText(text []byte) error
-}
diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go
deleted file mode 100644
index e0a742a8870..00000000000
--- a/vendor/github.com/BurntSushi/toml/lex.go
+++ /dev/null
@@ -1,953 +0,0 @@
-package toml
-
-import (
- "fmt"
- "strings"
- "unicode"
- "unicode/utf8"
-)
-
-type itemType int
-
-const (
- itemError itemType = iota
- itemNIL // used in the parser to indicate no type
- itemEOF
- itemText
- itemString
- itemRawString
- itemMultilineString
- itemRawMultilineString
- itemBool
- itemInteger
- itemFloat
- itemDatetime
- itemArray // the start of an array
- itemArrayEnd
- itemTableStart
- itemTableEnd
- itemArrayTableStart
- itemArrayTableEnd
- itemKeyStart
- itemCommentStart
- itemInlineTableStart
- itemInlineTableEnd
-)
-
-const (
- eof = 0
- comma = ','
- tableStart = '['
- tableEnd = ']'
- arrayTableStart = '['
- arrayTableEnd = ']'
- tableSep = '.'
- keySep = '='
- arrayStart = '['
- arrayEnd = ']'
- commentStart = '#'
- stringStart = '"'
- stringEnd = '"'
- rawStringStart = '\''
- rawStringEnd = '\''
- inlineTableStart = '{'
- inlineTableEnd = '}'
-)
-
-type stateFn func(lx *lexer) stateFn
-
-type lexer struct {
- input string
- start int
- pos int
- line int
- state stateFn
- items chan item
-
- // Allow for backing up up to three runes.
- // This is necessary because TOML contains 3-rune tokens (""" and ''').
- prevWidths [3]int
- nprev int // how many of prevWidths are in use
- // If we emit an eof, we can still back up, but it is not OK to call
- // next again.
- atEOF bool
-
- // A stack of state functions used to maintain context.
- // The idea is to reuse parts of the state machine in various places.
- // For example, values can appear at the top level or within arbitrarily
- // nested arrays. The last state on the stack is used after a value has
- // been lexed. Similarly for comments.
- stack []stateFn
-}
-
-type item struct {
- typ itemType
- val string
- line int
-}
-
-func (lx *lexer) nextItem() item {
- for {
- select {
- case item := <-lx.items:
- return item
- default:
- lx.state = lx.state(lx)
- }
- }
-}
-
-func lex(input string) *lexer {
- lx := &lexer{
- input: input,
- state: lexTop,
- line: 1,
- items: make(chan item, 10),
- stack: make([]stateFn, 0, 10),
- }
- return lx
-}
-
-func (lx *lexer) push(state stateFn) {
- lx.stack = append(lx.stack, state)
-}
-
-func (lx *lexer) pop() stateFn {
- if len(lx.stack) == 0 {
- return lx.errorf("BUG in lexer: no states to pop")
- }
- last := lx.stack[len(lx.stack)-1]
- lx.stack = lx.stack[0 : len(lx.stack)-1]
- return last
-}
-
-func (lx *lexer) current() string {
- return lx.input[lx.start:lx.pos]
-}
-
-func (lx *lexer) emit(typ itemType) {
- lx.items <- item{typ, lx.current(), lx.line}
- lx.start = lx.pos
-}
-
-func (lx *lexer) emitTrim(typ itemType) {
- lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line}
- lx.start = lx.pos
-}
-
-func (lx *lexer) next() (r rune) {
- if lx.atEOF {
- panic("next called after EOF")
- }
- if lx.pos >= len(lx.input) {
- lx.atEOF = true
- return eof
- }
-
- if lx.input[lx.pos] == '\n' {
- lx.line++
- }
- lx.prevWidths[2] = lx.prevWidths[1]
- lx.prevWidths[1] = lx.prevWidths[0]
- if lx.nprev < 3 {
- lx.nprev++
- }
- r, w := utf8.DecodeRuneInString(lx.input[lx.pos:])
- lx.prevWidths[0] = w
- lx.pos += w
- return r
-}
-
-// ignore skips over the pending input before this point.
-func (lx *lexer) ignore() {
- lx.start = lx.pos
-}
-
-// backup steps back one rune. Can be called only twice between calls to next.
-func (lx *lexer) backup() {
- if lx.atEOF {
- lx.atEOF = false
- return
- }
- if lx.nprev < 1 {
- panic("backed up too far")
- }
- w := lx.prevWidths[0]
- lx.prevWidths[0] = lx.prevWidths[1]
- lx.prevWidths[1] = lx.prevWidths[2]
- lx.nprev--
- lx.pos -= w
- if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
- lx.line--
- }
-}
-
-// accept consumes the next rune if it's equal to `valid`.
-func (lx *lexer) accept(valid rune) bool {
- if lx.next() == valid {
- return true
- }
- lx.backup()
- return false
-}
-
-// peek returns but does not consume the next rune in the input.
-func (lx *lexer) peek() rune {
- r := lx.next()
- lx.backup()
- return r
-}
-
-// skip ignores all input that matches the given predicate.
-func (lx *lexer) skip(pred func(rune) bool) {
- for {
- r := lx.next()
- if pred(r) {
- continue
- }
- lx.backup()
- lx.ignore()
- return
- }
-}
-
-// errorf stops all lexing by emitting an error and returning `nil`.
-// Note that any value that is a character is escaped if it's a special
-// character (newlines, tabs, etc.).
-func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
- lx.items <- item{
- itemError,
- fmt.Sprintf(format, values...),
- lx.line,
- }
- return nil
-}
-
-// lexTop consumes elements at the top level of TOML data.
-func lexTop(lx *lexer) stateFn {
- r := lx.next()
- if isWhitespace(r) || isNL(r) {
- return lexSkip(lx, lexTop)
- }
- switch r {
- case commentStart:
- lx.push(lexTop)
- return lexCommentStart
- case tableStart:
- return lexTableStart
- case eof:
- if lx.pos > lx.start {
- return lx.errorf("unexpected EOF")
- }
- lx.emit(itemEOF)
- return nil
- }
-
- // At this point, the only valid item can be a key, so we back up
- // and let the key lexer do the rest.
- lx.backup()
- lx.push(lexTopEnd)
- return lexKeyStart
-}
-
-// lexTopEnd is entered whenever a top-level item has been consumed. (A value
-// or a table.) It must see only whitespace, and will turn back to lexTop
-// upon a newline. If it sees EOF, it will quit the lexer successfully.
-func lexTopEnd(lx *lexer) stateFn {
- r := lx.next()
- switch {
- case r == commentStart:
- // a comment will read to a newline for us.
- lx.push(lexTop)
- return lexCommentStart
- case isWhitespace(r):
- return lexTopEnd
- case isNL(r):
- lx.ignore()
- return lexTop
- case r == eof:
- lx.emit(itemEOF)
- return nil
- }
- return lx.errorf("expected a top-level item to end with a newline, "+
- "comment, or EOF, but got %q instead", r)
-}
-
-// lexTable lexes the beginning of a table. Namely, it makes sure that
-// it starts with a character other than '.' and ']'.
-// It assumes that '[' has already been consumed.
-// It also handles the case that this is an item in an array of tables.
-// e.g., '[[name]]'.
-func lexTableStart(lx *lexer) stateFn {
- if lx.peek() == arrayTableStart {
- lx.next()
- lx.emit(itemArrayTableStart)
- lx.push(lexArrayTableEnd)
- } else {
- lx.emit(itemTableStart)
- lx.push(lexTableEnd)
- }
- return lexTableNameStart
-}
-
-func lexTableEnd(lx *lexer) stateFn {
- lx.emit(itemTableEnd)
- return lexTopEnd
-}
-
-func lexArrayTableEnd(lx *lexer) stateFn {
- if r := lx.next(); r != arrayTableEnd {
- return lx.errorf("expected end of table array name delimiter %q, "+
- "but got %q instead", arrayTableEnd, r)
- }
- lx.emit(itemArrayTableEnd)
- return lexTopEnd
-}
-
-func lexTableNameStart(lx *lexer) stateFn {
- lx.skip(isWhitespace)
- switch r := lx.peek(); {
- case r == tableEnd || r == eof:
- return lx.errorf("unexpected end of table name " +
- "(table names cannot be empty)")
- case r == tableSep:
- return lx.errorf("unexpected table separator " +
- "(table names cannot be empty)")
- case r == stringStart || r == rawStringStart:
- lx.ignore()
- lx.push(lexTableNameEnd)
- return lexValue // reuse string lexing
- default:
- return lexBareTableName
- }
-}
-
-// lexBareTableName lexes the name of a table. It assumes that at least one
-// valid character for the table has already been read.
-func lexBareTableName(lx *lexer) stateFn {
- r := lx.next()
- if isBareKeyChar(r) {
- return lexBareTableName
- }
- lx.backup()
- lx.emit(itemText)
- return lexTableNameEnd
-}
-
-// lexTableNameEnd reads the end of a piece of a table name, optionally
-// consuming whitespace.
-func lexTableNameEnd(lx *lexer) stateFn {
- lx.skip(isWhitespace)
- switch r := lx.next(); {
- case isWhitespace(r):
- return lexTableNameEnd
- case r == tableSep:
- lx.ignore()
- return lexTableNameStart
- case r == tableEnd:
- return lx.pop()
- default:
- return lx.errorf("expected '.' or ']' to end table name, "+
- "but got %q instead", r)
- }
-}
-
-// lexKeyStart consumes a key name up until the first non-whitespace character.
-// lexKeyStart will ignore whitespace.
-func lexKeyStart(lx *lexer) stateFn {
- r := lx.peek()
- switch {
- case r == keySep:
- return lx.errorf("unexpected key separator %q", keySep)
- case isWhitespace(r) || isNL(r):
- lx.next()
- return lexSkip(lx, lexKeyStart)
- case r == stringStart || r == rawStringStart:
- lx.ignore()
- lx.emit(itemKeyStart)
- lx.push(lexKeyEnd)
- return lexValue // reuse string lexing
- default:
- lx.ignore()
- lx.emit(itemKeyStart)
- return lexBareKey
- }
-}
-
-// lexBareKey consumes the text of a bare key. Assumes that the first character
-// (which is not whitespace) has not yet been consumed.
-func lexBareKey(lx *lexer) stateFn {
- switch r := lx.next(); {
- case isBareKeyChar(r):
- return lexBareKey
- case isWhitespace(r):
- lx.backup()
- lx.emit(itemText)
- return lexKeyEnd
- case r == keySep:
- lx.backup()
- lx.emit(itemText)
- return lexKeyEnd
- default:
- return lx.errorf("bare keys cannot contain %q", r)
- }
-}
-
-// lexKeyEnd consumes the end of a key and trims whitespace (up to the key
-// separator).
-func lexKeyEnd(lx *lexer) stateFn {
- switch r := lx.next(); {
- case r == keySep:
- return lexSkip(lx, lexValue)
- case isWhitespace(r):
- return lexSkip(lx, lexKeyEnd)
- default:
- return lx.errorf("expected key separator %q, but got %q instead",
- keySep, r)
- }
-}
-
-// lexValue starts the consumption of a value anywhere a value is expected.
-// lexValue will ignore whitespace.
-// After a value is lexed, the last state on the next is popped and returned.
-func lexValue(lx *lexer) stateFn {
- // We allow whitespace to precede a value, but NOT newlines.
- // In array syntax, the array states are responsible for ignoring newlines.
- r := lx.next()
- switch {
- case isWhitespace(r):
- return lexSkip(lx, lexValue)
- case isDigit(r):
- lx.backup() // avoid an extra state and use the same as above
- return lexNumberOrDateStart
- }
- switch r {
- case arrayStart:
- lx.ignore()
- lx.emit(itemArray)
- return lexArrayValue
- case inlineTableStart:
- lx.ignore()
- lx.emit(itemInlineTableStart)
- return lexInlineTableValue
- case stringStart:
- if lx.accept(stringStart) {
- if lx.accept(stringStart) {
- lx.ignore() // Ignore """
- return lexMultilineString
- }
- lx.backup()
- }
- lx.ignore() // ignore the '"'
- return lexString
- case rawStringStart:
- if lx.accept(rawStringStart) {
- if lx.accept(rawStringStart) {
- lx.ignore() // Ignore """
- return lexMultilineRawString
- }
- lx.backup()
- }
- lx.ignore() // ignore the "'"
- return lexRawString
- case '+', '-':
- return lexNumberStart
- case '.': // special error case, be kind to users
- return lx.errorf("floats must start with a digit, not '.'")
- }
- if unicode.IsLetter(r) {
- // Be permissive here; lexBool will give a nice error if the
- // user wrote something like
- // x = foo
- // (i.e. not 'true' or 'false' but is something else word-like.)
- lx.backup()
- return lexBool
- }
- return lx.errorf("expected value but found %q instead", r)
-}
-
-// lexArrayValue consumes one value in an array. It assumes that '[' or ','
-// have already been consumed. All whitespace and newlines are ignored.
-func lexArrayValue(lx *lexer) stateFn {
- r := lx.next()
- switch {
- case isWhitespace(r) || isNL(r):
- return lexSkip(lx, lexArrayValue)
- case r == commentStart:
- lx.push(lexArrayValue)
- return lexCommentStart
- case r == comma:
- return lx.errorf("unexpected comma")
- case r == arrayEnd:
- // NOTE(caleb): The spec isn't clear about whether you can have
- // a trailing comma or not, so we'll allow it.
- return lexArrayEnd
- }
-
- lx.backup()
- lx.push(lexArrayValueEnd)
- return lexValue
-}
-
-// lexArrayValueEnd consumes everything between the end of an array value and
-// the next value (or the end of the array): it ignores whitespace and newlines
-// and expects either a ',' or a ']'.
-func lexArrayValueEnd(lx *lexer) stateFn {
- r := lx.next()
- switch {
- case isWhitespace(r) || isNL(r):
- return lexSkip(lx, lexArrayValueEnd)
- case r == commentStart:
- lx.push(lexArrayValueEnd)
- return lexCommentStart
- case r == comma:
- lx.ignore()
- return lexArrayValue // move on to the next value
- case r == arrayEnd:
- return lexArrayEnd
- }
- return lx.errorf(
- "expected a comma or array terminator %q, but got %q instead",
- arrayEnd, r,
- )
-}
-
-// lexArrayEnd finishes the lexing of an array.
-// It assumes that a ']' has just been consumed.
-func lexArrayEnd(lx *lexer) stateFn {
- lx.ignore()
- lx.emit(itemArrayEnd)
- return lx.pop()
-}
-
-// lexInlineTableValue consumes one key/value pair in an inline table.
-// It assumes that '{' or ',' have already been consumed. Whitespace is ignored.
-func lexInlineTableValue(lx *lexer) stateFn {
- r := lx.next()
- switch {
- case isWhitespace(r):
- return lexSkip(lx, lexInlineTableValue)
- case isNL(r):
- return lx.errorf("newlines not allowed within inline tables")
- case r == commentStart:
- lx.push(lexInlineTableValue)
- return lexCommentStart
- case r == comma:
- return lx.errorf("unexpected comma")
- case r == inlineTableEnd:
- return lexInlineTableEnd
- }
- lx.backup()
- lx.push(lexInlineTableValueEnd)
- return lexKeyStart
-}
-
-// lexInlineTableValueEnd consumes everything between the end of an inline table
-// key/value pair and the next pair (or the end of the table):
-// it ignores whitespace and expects either a ',' or a '}'.
-func lexInlineTableValueEnd(lx *lexer) stateFn {
- r := lx.next()
- switch {
- case isWhitespace(r):
- return lexSkip(lx, lexInlineTableValueEnd)
- case isNL(r):
- return lx.errorf("newlines not allowed within inline tables")
- case r == commentStart:
- lx.push(lexInlineTableValueEnd)
- return lexCommentStart
- case r == comma:
- lx.ignore()
- return lexInlineTableValue
- case r == inlineTableEnd:
- return lexInlineTableEnd
- }
- return lx.errorf("expected a comma or an inline table terminator %q, "+
- "but got %q instead", inlineTableEnd, r)
-}
-
-// lexInlineTableEnd finishes the lexing of an inline table.
-// It assumes that a '}' has just been consumed.
-func lexInlineTableEnd(lx *lexer) stateFn {
- lx.ignore()
- lx.emit(itemInlineTableEnd)
- return lx.pop()
-}
-
-// lexString consumes the inner contents of a string. It assumes that the
-// beginning '"' has already been consumed and ignored.
-func lexString(lx *lexer) stateFn {
- r := lx.next()
- switch {
- case r == eof:
- return lx.errorf("unexpected EOF")
- case isNL(r):
- return lx.errorf("strings cannot contain newlines")
- case r == '\\':
- lx.push(lexString)
- return lexStringEscape
- case r == stringEnd:
- lx.backup()
- lx.emit(itemString)
- lx.next()
- lx.ignore()
- return lx.pop()
- }
- return lexString
-}
-
-// lexMultilineString consumes the inner contents of a string. It assumes that
-// the beginning '"""' has already been consumed and ignored.
-func lexMultilineString(lx *lexer) stateFn {
- switch lx.next() {
- case eof:
- return lx.errorf("unexpected EOF")
- case '\\':
- return lexMultilineStringEscape
- case stringEnd:
- if lx.accept(stringEnd) {
- if lx.accept(stringEnd) {
- lx.backup()
- lx.backup()
- lx.backup()
- lx.emit(itemMultilineString)
- lx.next()
- lx.next()
- lx.next()
- lx.ignore()
- return lx.pop()
- }
- lx.backup()
- }
- }
- return lexMultilineString
-}
-
-// lexRawString consumes a raw string. Nothing can be escaped in such a string.
-// It assumes that the beginning "'" has already been consumed and ignored.
-func lexRawString(lx *lexer) stateFn {
- r := lx.next()
- switch {
- case r == eof:
- return lx.errorf("unexpected EOF")
- case isNL(r):
- return lx.errorf("strings cannot contain newlines")
- case r == rawStringEnd:
- lx.backup()
- lx.emit(itemRawString)
- lx.next()
- lx.ignore()
- return lx.pop()
- }
- return lexRawString
-}
-
-// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
-// a string. It assumes that the beginning "'''" has already been consumed and
-// ignored.
-func lexMultilineRawString(lx *lexer) stateFn {
- switch lx.next() {
- case eof:
- return lx.errorf("unexpected EOF")
- case rawStringEnd:
- if lx.accept(rawStringEnd) {
- if lx.accept(rawStringEnd) {
- lx.backup()
- lx.backup()
- lx.backup()
- lx.emit(itemRawMultilineString)
- lx.next()
- lx.next()
- lx.next()
- lx.ignore()
- return lx.pop()
- }
- lx.backup()
- }
- }
- return lexMultilineRawString
-}
-
-// lexMultilineStringEscape consumes an escaped character. It assumes that the
-// preceding '\\' has already been consumed.
-func lexMultilineStringEscape(lx *lexer) stateFn {
- // Handle the special case first:
- if isNL(lx.next()) {
- return lexMultilineString
- }
- lx.backup()
- lx.push(lexMultilineString)
- return lexStringEscape(lx)
-}
-
-func lexStringEscape(lx *lexer) stateFn {
- r := lx.next()
- switch r {
- case 'b':
- fallthrough
- case 't':
- fallthrough
- case 'n':
- fallthrough
- case 'f':
- fallthrough
- case 'r':
- fallthrough
- case '"':
- fallthrough
- case '\\':
- return lx.pop()
- case 'u':
- return lexShortUnicodeEscape
- case 'U':
- return lexLongUnicodeEscape
- }
- return lx.errorf("invalid escape character %q; only the following "+
- "escape characters are allowed: "+
- `\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r)
-}
-
-func lexShortUnicodeEscape(lx *lexer) stateFn {
- var r rune
- for i := 0; i < 4; i++ {
- r = lx.next()
- if !isHexadecimal(r) {
- return lx.errorf(`expected four hexadecimal digits after '\u', `+
- "but got %q instead", lx.current())
- }
- }
- return lx.pop()
-}
-
-func lexLongUnicodeEscape(lx *lexer) stateFn {
- var r rune
- for i := 0; i < 8; i++ {
- r = lx.next()
- if !isHexadecimal(r) {
- return lx.errorf(`expected eight hexadecimal digits after '\U', `+
- "but got %q instead", lx.current())
- }
- }
- return lx.pop()
-}
-
-// lexNumberOrDateStart consumes either an integer, a float, or datetime.
-func lexNumberOrDateStart(lx *lexer) stateFn {
- r := lx.next()
- if isDigit(r) {
- return lexNumberOrDate
- }
- switch r {
- case '_':
- return lexNumber
- case 'e', 'E':
- return lexFloat
- case '.':
- return lx.errorf("floats must start with a digit, not '.'")
- }
- return lx.errorf("expected a digit but got %q", r)
-}
-
-// lexNumberOrDate consumes either an integer, float or datetime.
-func lexNumberOrDate(lx *lexer) stateFn {
- r := lx.next()
- if isDigit(r) {
- return lexNumberOrDate
- }
- switch r {
- case '-':
- return lexDatetime
- case '_':
- return lexNumber
- case '.', 'e', 'E':
- return lexFloat
- }
-
- lx.backup()
- lx.emit(itemInteger)
- return lx.pop()
-}
-
-// lexDatetime consumes a Datetime, to a first approximation.
-// The parser validates that it matches one of the accepted formats.
-func lexDatetime(lx *lexer) stateFn {
- r := lx.next()
- if isDigit(r) {
- return lexDatetime
- }
- switch r {
- case '-', 'T', ':', '.', 'Z', '+':
- return lexDatetime
- }
-
- lx.backup()
- lx.emit(itemDatetime)
- return lx.pop()
-}
-
-// lexNumberStart consumes either an integer or a float. It assumes that a sign
-// has already been read, but that *no* digits have been consumed.
-// lexNumberStart will move to the appropriate integer or float states.
-func lexNumberStart(lx *lexer) stateFn {
- // We MUST see a digit. Even floats have to start with a digit.
- r := lx.next()
- if !isDigit(r) {
- if r == '.' {
- return lx.errorf("floats must start with a digit, not '.'")
- }
- return lx.errorf("expected a digit but got %q", r)
- }
- return lexNumber
-}
-
-// lexNumber consumes an integer or a float after seeing the first digit.
-func lexNumber(lx *lexer) stateFn {
- r := lx.next()
- if isDigit(r) {
- return lexNumber
- }
- switch r {
- case '_':
- return lexNumber
- case '.', 'e', 'E':
- return lexFloat
- }
-
- lx.backup()
- lx.emit(itemInteger)
- return lx.pop()
-}
-
-// lexFloat consumes the elements of a float. It allows any sequence of
-// float-like characters, so floats emitted by the lexer are only a first
-// approximation and must be validated by the parser.
-func lexFloat(lx *lexer) stateFn {
- r := lx.next()
- if isDigit(r) {
- return lexFloat
- }
- switch r {
- case '_', '.', '-', '+', 'e', 'E':
- return lexFloat
- }
-
- lx.backup()
- lx.emit(itemFloat)
- return lx.pop()
-}
-
-// lexBool consumes a bool string: 'true' or 'false.
-func lexBool(lx *lexer) stateFn {
- var rs []rune
- for {
- r := lx.next()
- if !unicode.IsLetter(r) {
- lx.backup()
- break
- }
- rs = append(rs, r)
- }
- s := string(rs)
- switch s {
- case "true", "false":
- lx.emit(itemBool)
- return lx.pop()
- }
- return lx.errorf("expected value but found %q instead", s)
-}
-
-// lexCommentStart begins the lexing of a comment. It will emit
-// itemCommentStart and consume no characters, passing control to lexComment.
-func lexCommentStart(lx *lexer) stateFn {
- lx.ignore()
- lx.emit(itemCommentStart)
- return lexComment
-}
-
-// lexComment lexes an entire comment. It assumes that '#' has been consumed.
-// It will consume *up to* the first newline character, and pass control
-// back to the last state on the stack.
-func lexComment(lx *lexer) stateFn {
- r := lx.peek()
- if isNL(r) || r == eof {
- lx.emit(itemText)
- return lx.pop()
- }
- lx.next()
- return lexComment
-}
-
-// lexSkip ignores all slurped input and moves on to the next state.
-func lexSkip(lx *lexer, nextState stateFn) stateFn {
- return func(lx *lexer) stateFn {
- lx.ignore()
- return nextState
- }
-}
-
-// isWhitespace returns true if `r` is a whitespace character according
-// to the spec.
-func isWhitespace(r rune) bool {
- return r == '\t' || r == ' '
-}
-
-func isNL(r rune) bool {
- return r == '\n' || r == '\r'
-}
-
-func isDigit(r rune) bool {
- return r >= '0' && r <= '9'
-}
-
-func isHexadecimal(r rune) bool {
- return (r >= '0' && r <= '9') ||
- (r >= 'a' && r <= 'f') ||
- (r >= 'A' && r <= 'F')
-}
-
-func isBareKeyChar(r rune) bool {
- return (r >= 'A' && r <= 'Z') ||
- (r >= 'a' && r <= 'z') ||
- (r >= '0' && r <= '9') ||
- r == '_' ||
- r == '-'
-}
-
-func (itype itemType) String() string {
- switch itype {
- case itemError:
- return "Error"
- case itemNIL:
- return "NIL"
- case itemEOF:
- return "EOF"
- case itemText:
- return "Text"
- case itemString, itemRawString, itemMultilineString, itemRawMultilineString:
- return "String"
- case itemBool:
- return "Bool"
- case itemInteger:
- return "Integer"
- case itemFloat:
- return "Float"
- case itemDatetime:
- return "DateTime"
- case itemTableStart:
- return "TableStart"
- case itemTableEnd:
- return "TableEnd"
- case itemKeyStart:
- return "KeyStart"
- case itemArray:
- return "Array"
- case itemArrayEnd:
- return "ArrayEnd"
- case itemCommentStart:
- return "CommentStart"
- }
- panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype)))
-}
-
-func (item item) String() string {
- return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val)
-}
diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go
deleted file mode 100644
index 50869ef9266..00000000000
--- a/vendor/github.com/BurntSushi/toml/parse.go
+++ /dev/null
@@ -1,592 +0,0 @@
-package toml
-
-import (
- "fmt"
- "strconv"
- "strings"
- "time"
- "unicode"
- "unicode/utf8"
-)
-
-type parser struct {
- mapping map[string]interface{}
- types map[string]tomlType
- lx *lexer
-
- // A list of keys in the order that they appear in the TOML data.
- ordered []Key
-
- // the full key for the current hash in scope
- context Key
-
- // the base key name for everything except hashes
- currentKey string
-
- // rough approximation of line number
- approxLine int
-
- // A map of 'key.group.names' to whether they were created implicitly.
- implicits map[string]bool
-}
-
-type parseError string
-
-func (pe parseError) Error() string {
- return string(pe)
-}
-
-func parse(data string) (p *parser, err error) {
- defer func() {
- if r := recover(); r != nil {
- var ok bool
- if err, ok = r.(parseError); ok {
- return
- }
- panic(r)
- }
- }()
-
- p = &parser{
- mapping: make(map[string]interface{}),
- types: make(map[string]tomlType),
- lx: lex(data),
- ordered: make([]Key, 0),
- implicits: make(map[string]bool),
- }
- for {
- item := p.next()
- if item.typ == itemEOF {
- break
- }
- p.topLevel(item)
- }
-
- return p, nil
-}
-
-func (p *parser) panicf(format string, v ...interface{}) {
- msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s",
- p.approxLine, p.current(), fmt.Sprintf(format, v...))
- panic(parseError(msg))
-}
-
-func (p *parser) next() item {
- it := p.lx.nextItem()
- if it.typ == itemError {
- p.panicf("%s", it.val)
- }
- return it
-}
-
-func (p *parser) bug(format string, v ...interface{}) {
- panic(fmt.Sprintf("BUG: "+format+"\n\n", v...))
-}
-
-func (p *parser) expect(typ itemType) item {
- it := p.next()
- p.assertEqual(typ, it.typ)
- return it
-}
-
-func (p *parser) assertEqual(expected, got itemType) {
- if expected != got {
- p.bug("Expected '%s' but got '%s'.", expected, got)
- }
-}
-
-func (p *parser) topLevel(item item) {
- switch item.typ {
- case itemCommentStart:
- p.approxLine = item.line
- p.expect(itemText)
- case itemTableStart:
- kg := p.next()
- p.approxLine = kg.line
-
- var key Key
- for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() {
- key = append(key, p.keyString(kg))
- }
- p.assertEqual(itemTableEnd, kg.typ)
-
- p.establishContext(key, false)
- p.setType("", tomlHash)
- p.ordered = append(p.ordered, key)
- case itemArrayTableStart:
- kg := p.next()
- p.approxLine = kg.line
-
- var key Key
- for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() {
- key = append(key, p.keyString(kg))
- }
- p.assertEqual(itemArrayTableEnd, kg.typ)
-
- p.establishContext(key, true)
- p.setType("", tomlArrayHash)
- p.ordered = append(p.ordered, key)
- case itemKeyStart:
- kname := p.next()
- p.approxLine = kname.line
- p.currentKey = p.keyString(kname)
-
- val, typ := p.value(p.next())
- p.setValue(p.currentKey, val)
- p.setType(p.currentKey, typ)
- p.ordered = append(p.ordered, p.context.add(p.currentKey))
- p.currentKey = ""
- default:
- p.bug("Unexpected type at top level: %s", item.typ)
- }
-}
-
-// Gets a string for a key (or part of a key in a table name).
-func (p *parser) keyString(it item) string {
- switch it.typ {
- case itemText:
- return it.val
- case itemString, itemMultilineString,
- itemRawString, itemRawMultilineString:
- s, _ := p.value(it)
- return s.(string)
- default:
- p.bug("Unexpected key type: %s", it.typ)
- panic("unreachable")
- }
-}
-
-// value translates an expected value from the lexer into a Go value wrapped
-// as an empty interface.
-func (p *parser) value(it item) (interface{}, tomlType) {
- switch it.typ {
- case itemString:
- return p.replaceEscapes(it.val), p.typeOfPrimitive(it)
- case itemMultilineString:
- trimmed := stripFirstNewline(stripEscapedWhitespace(it.val))
- return p.replaceEscapes(trimmed), p.typeOfPrimitive(it)
- case itemRawString:
- return it.val, p.typeOfPrimitive(it)
- case itemRawMultilineString:
- return stripFirstNewline(it.val), p.typeOfPrimitive(it)
- case itemBool:
- switch it.val {
- case "true":
- return true, p.typeOfPrimitive(it)
- case "false":
- return false, p.typeOfPrimitive(it)
- }
- p.bug("Expected boolean value, but got '%s'.", it.val)
- case itemInteger:
- if !numUnderscoresOK(it.val) {
- p.panicf("Invalid integer %q: underscores must be surrounded by digits",
- it.val)
- }
- val := strings.Replace(it.val, "_", "", -1)
- num, err := strconv.ParseInt(val, 10, 64)
- if err != nil {
- // Distinguish integer values. Normally, it'd be a bug if the lexer
- // provides an invalid integer, but it's possible that the number is
- // out of range of valid values (which the lexer cannot determine).
- // So mark the former as a bug but the latter as a legitimate user
- // error.
- if e, ok := err.(*strconv.NumError); ok &&
- e.Err == strconv.ErrRange {
-
- p.panicf("Integer '%s' is out of the range of 64-bit "+
- "signed integers.", it.val)
- } else {
- p.bug("Expected integer value, but got '%s'.", it.val)
- }
- }
- return num, p.typeOfPrimitive(it)
- case itemFloat:
- parts := strings.FieldsFunc(it.val, func(r rune) bool {
- switch r {
- case '.', 'e', 'E':
- return true
- }
- return false
- })
- for _, part := range parts {
- if !numUnderscoresOK(part) {
- p.panicf("Invalid float %q: underscores must be "+
- "surrounded by digits", it.val)
- }
- }
- if !numPeriodsOK(it.val) {
- // As a special case, numbers like '123.' or '1.e2',
- // which are valid as far as Go/strconv are concerned,
- // must be rejected because TOML says that a fractional
- // part consists of '.' followed by 1+ digits.
- p.panicf("Invalid float %q: '.' must be followed "+
- "by one or more digits", it.val)
- }
- val := strings.Replace(it.val, "_", "", -1)
- num, err := strconv.ParseFloat(val, 64)
- if err != nil {
- if e, ok := err.(*strconv.NumError); ok &&
- e.Err == strconv.ErrRange {
-
- p.panicf("Float '%s' is out of the range of 64-bit "+
- "IEEE-754 floating-point numbers.", it.val)
- } else {
- p.panicf("Invalid float value: %q", it.val)
- }
- }
- return num, p.typeOfPrimitive(it)
- case itemDatetime:
- var t time.Time
- var ok bool
- var err error
- for _, format := range []string{
- "2006-01-02T15:04:05Z07:00",
- "2006-01-02T15:04:05",
- "2006-01-02",
- } {
- t, err = time.ParseInLocation(format, it.val, time.Local)
- if err == nil {
- ok = true
- break
- }
- }
- if !ok {
- p.panicf("Invalid TOML Datetime: %q.", it.val)
- }
- return t, p.typeOfPrimitive(it)
- case itemArray:
- array := make([]interface{}, 0)
- types := make([]tomlType, 0)
-
- for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
- if it.typ == itemCommentStart {
- p.expect(itemText)
- continue
- }
-
- val, typ := p.value(it)
- array = append(array, val)
- types = append(types, typ)
- }
- return array, p.typeOfArray(types)
- case itemInlineTableStart:
- var (
- hash = make(map[string]interface{})
- outerContext = p.context
- outerKey = p.currentKey
- )
-
- p.context = append(p.context, p.currentKey)
- p.currentKey = ""
- for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() {
- if it.typ != itemKeyStart {
- p.bug("Expected key start but instead found %q, around line %d",
- it.val, p.approxLine)
- }
- if it.typ == itemCommentStart {
- p.expect(itemText)
- continue
- }
-
- // retrieve key
- k := p.next()
- p.approxLine = k.line
- kname := p.keyString(k)
-
- // retrieve value
- p.currentKey = kname
- val, typ := p.value(p.next())
- // make sure we keep metadata up to date
- p.setType(kname, typ)
- p.ordered = append(p.ordered, p.context.add(p.currentKey))
- hash[kname] = val
- }
- p.context = outerContext
- p.currentKey = outerKey
- return hash, tomlHash
- }
- p.bug("Unexpected value type: %s", it.typ)
- panic("unreachable")
-}
-
-// numUnderscoresOK checks whether each underscore in s is surrounded by
-// characters that are not underscores.
-func numUnderscoresOK(s string) bool {
- accept := false
- for _, r := range s {
- if r == '_' {
- if !accept {
- return false
- }
- accept = false
- continue
- }
- accept = true
- }
- return accept
-}
-
-// numPeriodsOK checks whether every period in s is followed by a digit.
-func numPeriodsOK(s string) bool {
- period := false
- for _, r := range s {
- if period && !isDigit(r) {
- return false
- }
- period = r == '.'
- }
- return !period
-}
-
-// establishContext sets the current context of the parser,
-// where the context is either a hash or an array of hashes. Which one is
-// set depends on the value of the `array` parameter.
-//
-// Establishing the context also makes sure that the key isn't a duplicate, and
-// will create implicit hashes automatically.
-func (p *parser) establishContext(key Key, array bool) {
- var ok bool
-
- // Always start at the top level and drill down for our context.
- hashContext := p.mapping
- keyContext := make(Key, 0)
-
- // We only need implicit hashes for key[0:-1]
- for _, k := range key[0 : len(key)-1] {
- _, ok = hashContext[k]
- keyContext = append(keyContext, k)
-
- // No key? Make an implicit hash and move on.
- if !ok {
- p.addImplicit(keyContext)
- hashContext[k] = make(map[string]interface{})
- }
-
- // If the hash context is actually an array of tables, then set
- // the hash context to the last element in that array.
- //
- // Otherwise, it better be a table, since this MUST be a key group (by
- // virtue of it not being the last element in a key).
- switch t := hashContext[k].(type) {
- case []map[string]interface{}:
- hashContext = t[len(t)-1]
- case map[string]interface{}:
- hashContext = t
- default:
- p.panicf("Key '%s' was already created as a hash.", keyContext)
- }
- }
-
- p.context = keyContext
- if array {
- // If this is the first element for this array, then allocate a new
- // list of tables for it.
- k := key[len(key)-1]
- if _, ok := hashContext[k]; !ok {
- hashContext[k] = make([]map[string]interface{}, 0, 5)
- }
-
- // Add a new table. But make sure the key hasn't already been used
- // for something else.
- if hash, ok := hashContext[k].([]map[string]interface{}); ok {
- hashContext[k] = append(hash, make(map[string]interface{}))
- } else {
- p.panicf("Key '%s' was already created and cannot be used as "+
- "an array.", keyContext)
- }
- } else {
- p.setValue(key[len(key)-1], make(map[string]interface{}))
- }
- p.context = append(p.context, key[len(key)-1])
-}
-
-// setValue sets the given key to the given value in the current context.
-// It will make sure that the key hasn't already been defined, account for
-// implicit key groups.
-func (p *parser) setValue(key string, value interface{}) {
- var tmpHash interface{}
- var ok bool
-
- hash := p.mapping
- keyContext := make(Key, 0)
- for _, k := range p.context {
- keyContext = append(keyContext, k)
- if tmpHash, ok = hash[k]; !ok {
- p.bug("Context for key '%s' has not been established.", keyContext)
- }
- switch t := tmpHash.(type) {
- case []map[string]interface{}:
- // The context is a table of hashes. Pick the most recent table
- // defined as the current hash.
- hash = t[len(t)-1]
- case map[string]interface{}:
- hash = t
- default:
- p.bug("Expected hash to have type 'map[string]interface{}', but "+
- "it has '%T' instead.", tmpHash)
- }
- }
- keyContext = append(keyContext, key)
-
- if _, ok := hash[key]; ok {
- // Typically, if the given key has already been set, then we have
- // to raise an error since duplicate keys are disallowed. However,
- // it's possible that a key was previously defined implicitly. In this
- // case, it is allowed to be redefined concretely. (See the
- // `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.)
- //
- // But we have to make sure to stop marking it as an implicit. (So that
- // another redefinition provokes an error.)
- //
- // Note that since it has already been defined (as a hash), we don't
- // want to overwrite it. So our business is done.
- if p.isImplicit(keyContext) {
- p.removeImplicit(keyContext)
- return
- }
-
- // Otherwise, we have a concrete key trying to override a previous
- // key, which is *always* wrong.
- p.panicf("Key '%s' has already been defined.", keyContext)
- }
- hash[key] = value
-}
-
-// setType sets the type of a particular value at a given key.
-// It should be called immediately AFTER setValue.
-//
-// Note that if `key` is empty, then the type given will be applied to the
-// current context (which is either a table or an array of tables).
-func (p *parser) setType(key string, typ tomlType) {
- keyContext := make(Key, 0, len(p.context)+1)
- for _, k := range p.context {
- keyContext = append(keyContext, k)
- }
- if len(key) > 0 { // allow type setting for hashes
- keyContext = append(keyContext, key)
- }
- p.types[keyContext.String()] = typ
-}
-
-// addImplicit sets the given Key as having been created implicitly.
-func (p *parser) addImplicit(key Key) {
- p.implicits[key.String()] = true
-}
-
-// removeImplicit stops tagging the given key as having been implicitly
-// created.
-func (p *parser) removeImplicit(key Key) {
- p.implicits[key.String()] = false
-}
-
-// isImplicit returns true if the key group pointed to by the key was created
-// implicitly.
-func (p *parser) isImplicit(key Key) bool {
- return p.implicits[key.String()]
-}
-
-// current returns the full key name of the current context.
-func (p *parser) current() string {
- if len(p.currentKey) == 0 {
- return p.context.String()
- }
- if len(p.context) == 0 {
- return p.currentKey
- }
- return fmt.Sprintf("%s.%s", p.context, p.currentKey)
-}
-
-func stripFirstNewline(s string) string {
- if len(s) == 0 || s[0] != '\n' {
- return s
- }
- return s[1:]
-}
-
-func stripEscapedWhitespace(s string) string {
- esc := strings.Split(s, "\\\n")
- if len(esc) > 1 {
- for i := 1; i < len(esc); i++ {
- esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace)
- }
- }
- return strings.Join(esc, "")
-}
-
-func (p *parser) replaceEscapes(str string) string {
- var replaced []rune
- s := []byte(str)
- r := 0
- for r < len(s) {
- if s[r] != '\\' {
- c, size := utf8.DecodeRune(s[r:])
- r += size
- replaced = append(replaced, c)
- continue
- }
- r += 1
- if r >= len(s) {
- p.bug("Escape sequence at end of string.")
- return ""
- }
- switch s[r] {
- default:
- p.bug("Expected valid escape code after \\, but got %q.", s[r])
- return ""
- case 'b':
- replaced = append(replaced, rune(0x0008))
- r += 1
- case 't':
- replaced = append(replaced, rune(0x0009))
- r += 1
- case 'n':
- replaced = append(replaced, rune(0x000A))
- r += 1
- case 'f':
- replaced = append(replaced, rune(0x000C))
- r += 1
- case 'r':
- replaced = append(replaced, rune(0x000D))
- r += 1
- case '"':
- replaced = append(replaced, rune(0x0022))
- r += 1
- case '\\':
- replaced = append(replaced, rune(0x005C))
- r += 1
- case 'u':
- // At this point, we know we have a Unicode escape of the form
- // `uXXXX` at [r, r+5). (Because the lexer guarantees this
- // for us.)
- escaped := p.asciiEscapeToUnicode(s[r+1 : r+5])
- replaced = append(replaced, escaped)
- r += 5
- case 'U':
- // At this point, we know we have a Unicode escape of the form
- // `uXXXX` at [r, r+9). (Because the lexer guarantees this
- // for us.)
- escaped := p.asciiEscapeToUnicode(s[r+1 : r+9])
- replaced = append(replaced, escaped)
- r += 9
- }
- }
- return string(replaced)
-}
-
-func (p *parser) asciiEscapeToUnicode(bs []byte) rune {
- s := string(bs)
- hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
- if err != nil {
- p.bug("Could not parse '%s' as a hexadecimal number, but the "+
- "lexer claims it's OK: %s", s, err)
- }
- if !utf8.ValidRune(rune(hex)) {
- p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s)
- }
- return rune(hex)
-}
-
-func isStringType(ty itemType) bool {
- return ty == itemString || ty == itemMultilineString ||
- ty == itemRawString || ty == itemRawMultilineString
-}
diff --git a/vendor/github.com/BurntSushi/toml/session.vim b/vendor/github.com/BurntSushi/toml/session.vim
deleted file mode 100644
index 562164be060..00000000000
--- a/vendor/github.com/BurntSushi/toml/session.vim
+++ /dev/null
@@ -1 +0,0 @@
-au BufWritePost *.go silent!make tags > /dev/null 2>&1
diff --git a/vendor/github.com/BurntSushi/toml/type_check.go b/vendor/github.com/BurntSushi/toml/type_check.go
deleted file mode 100644
index c73f8afc1a6..00000000000
--- a/vendor/github.com/BurntSushi/toml/type_check.go
+++ /dev/null
@@ -1,91 +0,0 @@
-package toml
-
-// tomlType represents any Go type that corresponds to a TOML type.
-// While the first draft of the TOML spec has a simplistic type system that
-// probably doesn't need this level of sophistication, we seem to be militating
-// toward adding real composite types.
-type tomlType interface {
- typeString() string
-}
-
-// typeEqual accepts any two types and returns true if they are equal.
-func typeEqual(t1, t2 tomlType) bool {
- if t1 == nil || t2 == nil {
- return false
- }
- return t1.typeString() == t2.typeString()
-}
-
-func typeIsHash(t tomlType) bool {
- return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash)
-}
-
-type tomlBaseType string
-
-func (btype tomlBaseType) typeString() string {
- return string(btype)
-}
-
-func (btype tomlBaseType) String() string {
- return btype.typeString()
-}
-
-var (
- tomlInteger tomlBaseType = "Integer"
- tomlFloat tomlBaseType = "Float"
- tomlDatetime tomlBaseType = "Datetime"
- tomlString tomlBaseType = "String"
- tomlBool tomlBaseType = "Bool"
- tomlArray tomlBaseType = "Array"
- tomlHash tomlBaseType = "Hash"
- tomlArrayHash tomlBaseType = "ArrayHash"
-)
-
-// typeOfPrimitive returns a tomlType of any primitive value in TOML.
-// Primitive values are: Integer, Float, Datetime, String and Bool.
-//
-// Passing a lexer item other than the following will cause a BUG message
-// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime.
-func (p *parser) typeOfPrimitive(lexItem item) tomlType {
- switch lexItem.typ {
- case itemInteger:
- return tomlInteger
- case itemFloat:
- return tomlFloat
- case itemDatetime:
- return tomlDatetime
- case itemString:
- return tomlString
- case itemMultilineString:
- return tomlString
- case itemRawString:
- return tomlString
- case itemRawMultilineString:
- return tomlString
- case itemBool:
- return tomlBool
- }
- p.bug("Cannot infer primitive type of lex item '%s'.", lexItem)
- panic("unreachable")
-}
-
-// typeOfArray returns a tomlType for an array given a list of types of its
-// values.
-//
-// In the current spec, if an array is homogeneous, then its type is always
-// "Array". If the array is not homogeneous, an error is generated.
-func (p *parser) typeOfArray(types []tomlType) tomlType {
- // Empty arrays are cool.
- if len(types) == 0 {
- return tomlArray
- }
-
- theType := types[0]
- for _, t := range types[1:] {
- if !typeEqual(theType, t) {
- p.panicf("Array contains values of type '%s' and '%s', but "+
- "arrays must be homogeneous.", theType, t)
- }
- }
- return tomlArray
-}
diff --git a/vendor/github.com/BurntSushi/toml/type_fields.go b/vendor/github.com/BurntSushi/toml/type_fields.go
deleted file mode 100644
index 608997c22f6..00000000000
--- a/vendor/github.com/BurntSushi/toml/type_fields.go
+++ /dev/null
@@ -1,242 +0,0 @@
-package toml
-
-// Struct field handling is adapted from code in encoding/json:
-//
-// Copyright 2010 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the Go distribution.
-
-import (
- "reflect"
- "sort"
- "sync"
-)
-
-// A field represents a single field found in a struct.
-type field struct {
- name string // the name of the field (`toml` tag included)
- tag bool // whether field has a `toml` tag
- index []int // represents the depth of an anonymous field
- typ reflect.Type // the type of the field
-}
-
-// byName sorts field by name, breaking ties with depth,
-// then breaking ties with "name came from toml tag", then
-// breaking ties with index sequence.
-type byName []field
-
-func (x byName) Len() int { return len(x) }
-
-func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-
-func (x byName) Less(i, j int) bool {
- if x[i].name != x[j].name {
- return x[i].name < x[j].name
- }
- if len(x[i].index) != len(x[j].index) {
- return len(x[i].index) < len(x[j].index)
- }
- if x[i].tag != x[j].tag {
- return x[i].tag
- }
- return byIndex(x).Less(i, j)
-}
-
-// byIndex sorts field by index sequence.
-type byIndex []field
-
-func (x byIndex) Len() int { return len(x) }
-
-func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-
-func (x byIndex) Less(i, j int) bool {
- for k, xik := range x[i].index {
- if k >= len(x[j].index) {
- return false
- }
- if xik != x[j].index[k] {
- return xik < x[j].index[k]
- }
- }
- return len(x[i].index) < len(x[j].index)
-}
-
-// typeFields returns a list of fields that TOML should recognize for the given
-// type. The algorithm is breadth-first search over the set of structs to
-// include - the top struct and then any reachable anonymous structs.
-func typeFields(t reflect.Type) []field {
- // Anonymous fields to explore at the current level and the next.
- current := []field{}
- next := []field{{typ: t}}
-
- // Count of queued names for current level and the next.
- count := map[reflect.Type]int{}
- nextCount := map[reflect.Type]int{}
-
- // Types already visited at an earlier level.
- visited := map[reflect.Type]bool{}
-
- // Fields found.
- var fields []field
-
- for len(next) > 0 {
- current, next = next, current[:0]
- count, nextCount = nextCount, map[reflect.Type]int{}
-
- for _, f := range current {
- if visited[f.typ] {
- continue
- }
- visited[f.typ] = true
-
- // Scan f.typ for fields to include.
- for i := 0; i < f.typ.NumField(); i++ {
- sf := f.typ.Field(i)
- if sf.PkgPath != "" && !sf.Anonymous { // unexported
- continue
- }
- opts := getOptions(sf.Tag)
- if opts.skip {
- continue
- }
- index := make([]int, len(f.index)+1)
- copy(index, f.index)
- index[len(f.index)] = i
-
- ft := sf.Type
- if ft.Name() == "" && ft.Kind() == reflect.Ptr {
- // Follow pointer.
- ft = ft.Elem()
- }
-
- // Record found field and index sequence.
- if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
- tagged := opts.name != ""
- name := opts.name
- if name == "" {
- name = sf.Name
- }
- fields = append(fields, field{name, tagged, index, ft})
- if count[f.typ] > 1 {
- // If there were multiple instances, add a second,
- // so that the annihilation code will see a duplicate.
- // It only cares about the distinction between 1 or 2,
- // so don't bother generating any more copies.
- fields = append(fields, fields[len(fields)-1])
- }
- continue
- }
-
- // Record new anonymous struct to explore in next round.
- nextCount[ft]++
- if nextCount[ft] == 1 {
- f := field{name: ft.Name(), index: index, typ: ft}
- next = append(next, f)
- }
- }
- }
- }
-
- sort.Sort(byName(fields))
-
- // Delete all fields that are hidden by the Go rules for embedded fields,
- // except that fields with TOML tags are promoted.
-
- // The fields are sorted in primary order of name, secondary order
- // of field index length. Loop over names; for each name, delete
- // hidden fields by choosing the one dominant field that survives.
- out := fields[:0]
- for advance, i := 0, 0; i < len(fields); i += advance {
- // One iteration per name.
- // Find the sequence of fields with the name of this first field.
- fi := fields[i]
- name := fi.name
- for advance = 1; i+advance < len(fields); advance++ {
- fj := fields[i+advance]
- if fj.name != name {
- break
- }
- }
- if advance == 1 { // Only one field with this name
- out = append(out, fi)
- continue
- }
- dominant, ok := dominantField(fields[i : i+advance])
- if ok {
- out = append(out, dominant)
- }
- }
-
- fields = out
- sort.Sort(byIndex(fields))
-
- return fields
-}
-
-// dominantField looks through the fields, all of which are known to
-// have the same name, to find the single field that dominates the
-// others using Go's embedding rules, modified by the presence of
-// TOML tags. If there are multiple top-level fields, the boolean
-// will be false: This condition is an error in Go and we skip all
-// the fields.
-func dominantField(fields []field) (field, bool) {
- // The fields are sorted in increasing index-length order. The winner
- // must therefore be one with the shortest index length. Drop all
- // longer entries, which is easy: just truncate the slice.
- length := len(fields[0].index)
- tagged := -1 // Index of first tagged field.
- for i, f := range fields {
- if len(f.index) > length {
- fields = fields[:i]
- break
- }
- if f.tag {
- if tagged >= 0 {
- // Multiple tagged fields at the same level: conflict.
- // Return no field.
- return field{}, false
- }
- tagged = i
- }
- }
- if tagged >= 0 {
- return fields[tagged], true
- }
- // All remaining fields have the same length. If there's more than one,
- // we have a conflict (two fields named "X" at the same level) and we
- // return no field.
- if len(fields) > 1 {
- return field{}, false
- }
- return fields[0], true
-}
-
-var fieldCache struct {
- sync.RWMutex
- m map[reflect.Type][]field
-}
-
-// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
-func cachedTypeFields(t reflect.Type) []field {
- fieldCache.RLock()
- f := fieldCache.m[t]
- fieldCache.RUnlock()
- if f != nil {
- return f
- }
-
- // Compute fields without lock.
- // Might duplicate effort but won't hold other computations back.
- f = typeFields(t)
- if f == nil {
- f = []field{}
- }
-
- fieldCache.Lock()
- if fieldCache.m == nil {
- fieldCache.m = map[reflect.Type][]field{}
- }
- fieldCache.m[t] = f
- fieldCache.Unlock()
- return f
-}
diff --git a/vendor/github.com/docker/spdystream/CONTRIBUTING.md b/vendor/github.com/docker/spdystream/CONTRIBUTING.md
new file mode 100644
index 00000000000..d4eddcc5396
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/CONTRIBUTING.md
@@ -0,0 +1,13 @@
+# Contributing to SpdyStream
+
+Want to hack on spdystream? Awesome! Here are instructions to get you
+started.
+
+SpdyStream is a part of the [Docker](https://docker.io) project, and follows
+the same rules and principles. If you're already familiar with the way
+Docker does things, you'll feel right at home.
+
+Otherwise, go read
+[Docker's contributions guidelines](https://github.com/dotcloud/docker/blob/master/CONTRIBUTING.md).
+
+Happy hacking!
diff --git a/vendor/github.com/docker/spdystream/LICENSE b/vendor/github.com/docker/spdystream/LICENSE
new file mode 100644
index 00000000000..9e4bd4dbee9
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/LICENSE
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2014-2015 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/docker/spdystream/LICENSE.docs b/vendor/github.com/docker/spdystream/LICENSE.docs
new file mode 100644
index 00000000000..e26cd4fc8ed
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/LICENSE.docs
@@ -0,0 +1,425 @@
+Attribution-ShareAlike 4.0 International
+
+=======================================================================
+
+Creative Commons Corporation ("Creative Commons") is not a law firm and
+does not provide legal services or legal advice. Distribution of
+Creative Commons public licenses does not create a lawyer-client or
+other relationship. Creative Commons makes its licenses and related
+information available on an "as-is" basis. Creative Commons gives no
+warranties regarding its licenses, any material licensed under their
+terms and conditions, or any related information. Creative Commons
+disclaims all liability for damages resulting from their use to the
+fullest extent possible.
+
+Using Creative Commons Public Licenses
+
+Creative Commons public licenses provide a standard set of terms and
+conditions that creators and other rights holders may use to share
+original works of authorship and other material subject to copyright
+and certain other rights specified in the public license below. The
+following considerations are for informational purposes only, are not
+exhaustive, and do not form part of our licenses.
+
+ Considerations for licensors: Our public licenses are
+ intended for use by those authorized to give the public
+ permission to use material in ways otherwise restricted by
+ copyright and certain other rights. Our licenses are
+ irrevocable. Licensors should read and understand the terms
+ and conditions of the license they choose before applying it.
+ Licensors should also secure all rights necessary before
+ applying our licenses so that the public can reuse the
+ material as expected. Licensors should clearly mark any
+ material not subject to the license. This includes other CC-
+ licensed material, or material used under an exception or
+ limitation to copyright. More considerations for licensors:
+ wiki.creativecommons.org/Considerations_for_licensors
+
+ Considerations for the public: By using one of our public
+ licenses, a licensor grants the public permission to use the
+ licensed material under specified terms and conditions. If
+ the licensor's permission is not necessary for any reason--for
+ example, because of any applicable exception or limitation to
+ copyright--then that use is not regulated by the license. Our
+ licenses grant only permissions under copyright and certain
+ other rights that a licensor has authority to grant. Use of
+ the licensed material may still be restricted for other
+ reasons, including because others have copyright or other
+ rights in the material. A licensor may make special requests,
+ such as asking that all changes be marked or described.
+ Although not required by our licenses, you are encouraged to
+ respect those requests where reasonable. More_considerations
+ for the public:
+ wiki.creativecommons.org/Considerations_for_licensees
+
+=======================================================================
+
+Creative Commons Attribution-ShareAlike 4.0 International Public
+License
+
+By exercising the Licensed Rights (defined below), You accept and agree
+to be bound by the terms and conditions of this Creative Commons
+Attribution-ShareAlike 4.0 International Public License ("Public
+License"). To the extent this Public License may be interpreted as a
+contract, You are granted the Licensed Rights in consideration of Your
+acceptance of these terms and conditions, and the Licensor grants You
+such rights in consideration of benefits the Licensor receives from
+making the Licensed Material available under these terms and
+conditions.
+
+
+Section 1 -- Definitions.
+
+ a. Adapted Material means material subject to Copyright and Similar
+ Rights that is derived from or based upon the Licensed Material
+ and in which the Licensed Material is translated, altered,
+ arranged, transformed, or otherwise modified in a manner requiring
+ permission under the Copyright and Similar Rights held by the
+ Licensor. For purposes of this Public License, where the Licensed
+ Material is a musical work, performance, or sound recording,
+ Adapted Material is always produced where the Licensed Material is
+ synched in timed relation with a moving image.
+
+ b. Adapter's License means the license You apply to Your Copyright
+ and Similar Rights in Your contributions to Adapted Material in
+ accordance with the terms and conditions of this Public License.
+
+ c. BY-SA Compatible License means a license listed at
+ creativecommons.org/compatiblelicenses, approved by Creative
+ Commons as essentially the equivalent of this Public License.
+
+ d. Copyright and Similar Rights means copyright and/or similar rights
+ closely related to copyright including, without limitation,
+ performance, broadcast, sound recording, and Sui Generis Database
+ Rights, without regard to how the rights are labeled or
+ categorized. For purposes of this Public License, the rights
+ specified in Section 2(b)(1)-(2) are not Copyright and Similar
+ Rights.
+
+ e. Effective Technological Measures means those measures that, in the
+ absence of proper authority, may not be circumvented under laws
+ fulfilling obligations under Article 11 of the WIPO Copyright
+ Treaty adopted on December 20, 1996, and/or similar international
+ agreements.
+
+ f. Exceptions and Limitations means fair use, fair dealing, and/or
+ any other exception or limitation to Copyright and Similar Rights
+ that applies to Your use of the Licensed Material.
+
+ g. License Elements means the license attributes listed in the name
+ of a Creative Commons Public License. The License Elements of this
+ Public License are Attribution and ShareAlike.
+
+ h. Licensed Material means the artistic or literary work, database,
+ or other material to which the Licensor applied this Public
+ License.
+
+ i. Licensed Rights means the rights granted to You subject to the
+ terms and conditions of this Public License, which are limited to
+ all Copyright and Similar Rights that apply to Your use of the
+ Licensed Material and that the Licensor has authority to license.
+
+ j. Licensor means the individual(s) or entity(ies) granting rights
+ under this Public License.
+
+ k. Share means to provide material to the public by any means or
+ process that requires permission under the Licensed Rights, such
+ as reproduction, public display, public performance, distribution,
+ dissemination, communication, or importation, and to make material
+ available to the public including in ways that members of the
+ public may access the material from a place and at a time
+ individually chosen by them.
+
+ l. Sui Generis Database Rights means rights other than copyright
+ resulting from Directive 96/9/EC of the European Parliament and of
+ the Council of 11 March 1996 on the legal protection of databases,
+ as amended and/or succeeded, as well as other essentially
+ equivalent rights anywhere in the world.
+
+ m. You means the individual or entity exercising the Licensed Rights
+ under this Public License. Your has a corresponding meaning.
+
+
+Section 2 -- Scope.
+
+ a. License grant.
+
+ 1. Subject to the terms and conditions of this Public License,
+ the Licensor hereby grants You a worldwide, royalty-free,
+ non-sublicensable, non-exclusive, irrevocable license to
+ exercise the Licensed Rights in the Licensed Material to:
+
+ a. reproduce and Share the Licensed Material, in whole or
+ in part; and
+
+ b. produce, reproduce, and Share Adapted Material.
+
+ 2. Exceptions and Limitations. For the avoidance of doubt, where
+ Exceptions and Limitations apply to Your use, this Public
+ License does not apply, and You do not need to comply with
+ its terms and conditions.
+
+ 3. Term. The term of this Public License is specified in Section
+ 6(a).
+
+ 4. Media and formats; technical modifications allowed. The
+ Licensor authorizes You to exercise the Licensed Rights in
+ all media and formats whether now known or hereafter created,
+ and to make technical modifications necessary to do so. The
+ Licensor waives and/or agrees not to assert any right or
+ authority to forbid You from making technical modifications
+ necessary to exercise the Licensed Rights, including
+ technical modifications necessary to circumvent Effective
+ Technological Measures. For purposes of this Public License,
+ simply making modifications authorized by this Section 2(a)
+ (4) never produces Adapted Material.
+
+ 5. Downstream recipients.
+
+ a. Offer from the Licensor -- Licensed Material. Every
+ recipient of the Licensed Material automatically
+ receives an offer from the Licensor to exercise the
+ Licensed Rights under the terms and conditions of this
+ Public License.
+
+ b. Additional offer from the Licensor -- Adapted Material.
+ Every recipient of Adapted Material from You
+ automatically receives an offer from the Licensor to
+ exercise the Licensed Rights in the Adapted Material
+ under the conditions of the Adapter's License You apply.
+
+ c. No downstream restrictions. You may not offer or impose
+ any additional or different terms or conditions on, or
+ apply any Effective Technological Measures to, the
+ Licensed Material if doing so restricts exercise of the
+ Licensed Rights by any recipient of the Licensed
+ Material.
+
+ 6. No endorsement. Nothing in this Public License constitutes or
+ may be construed as permission to assert or imply that You
+ are, or that Your use of the Licensed Material is, connected
+ with, or sponsored, endorsed, or granted official status by,
+ the Licensor or others designated to receive attribution as
+ provided in Section 3(a)(1)(A)(i).
+
+ b. Other rights.
+
+ 1. Moral rights, such as the right of integrity, are not
+ licensed under this Public License, nor are publicity,
+ privacy, and/or other similar personality rights; however, to
+ the extent possible, the Licensor waives and/or agrees not to
+ assert any such rights held by the Licensor to the limited
+ extent necessary to allow You to exercise the Licensed
+ Rights, but not otherwise.
+
+ 2. Patent and trademark rights are not licensed under this
+ Public License.
+
+ 3. To the extent possible, the Licensor waives any right to
+ collect royalties from You for the exercise of the Licensed
+ Rights, whether directly or through a collecting society
+ under any voluntary or waivable statutory or compulsory
+ licensing scheme. In all other cases the Licensor expressly
+ reserves any right to collect such royalties.
+
+
+Section 3 -- License Conditions.
+
+Your exercise of the Licensed Rights is expressly made subject to the
+following conditions.
+
+ a. Attribution.
+
+ 1. If You Share the Licensed Material (including in modified
+ form), You must:
+
+ a. retain the following if it is supplied by the Licensor
+ with the Licensed Material:
+
+ i. identification of the creator(s) of the Licensed
+ Material and any others designated to receive
+ attribution, in any reasonable manner requested by
+ the Licensor (including by pseudonym if
+ designated);
+
+ ii. a copyright notice;
+
+ iii. a notice that refers to this Public License;
+
+ iv. a notice that refers to the disclaimer of
+ warranties;
+
+ v. a URI or hyperlink to the Licensed Material to the
+ extent reasonably practicable;
+
+ b. indicate if You modified the Licensed Material and
+ retain an indication of any previous modifications; and
+
+ c. indicate the Licensed Material is licensed under this
+ Public License, and include the text of, or the URI or
+ hyperlink to, this Public License.
+
+ 2. You may satisfy the conditions in Section 3(a)(1) in any
+ reasonable manner based on the medium, means, and context in
+ which You Share the Licensed Material. For example, it may be
+ reasonable to satisfy the conditions by providing a URI or
+ hyperlink to a resource that includes the required
+ information.
+
+ 3. If requested by the Licensor, You must remove any of the
+ information required by Section 3(a)(1)(A) to the extent
+ reasonably practicable.
+
+ b. ShareAlike.
+
+ In addition to the conditions in Section 3(a), if You Share
+ Adapted Material You produce, the following conditions also apply.
+
+ 1. The Adapter's License You apply must be a Creative Commons
+ license with the same License Elements, this version or
+ later, or a BY-SA Compatible License.
+
+ 2. You must include the text of, or the URI or hyperlink to, the
+ Adapter's License You apply. You may satisfy this condition
+ in any reasonable manner based on the medium, means, and
+ context in which You Share Adapted Material.
+
+ 3. You may not offer or impose any additional or different terms
+ or conditions on, or apply any Effective Technological
+ Measures to, Adapted Material that restrict exercise of the
+ rights granted under the Adapter's License You apply.
+
+
+Section 4 -- Sui Generis Database Rights.
+
+Where the Licensed Rights include Sui Generis Database Rights that
+apply to Your use of the Licensed Material:
+
+ a. for the avoidance of doubt, Section 2(a)(1) grants You the right
+ to extract, reuse, reproduce, and Share all or a substantial
+ portion of the contents of the database;
+
+ b. if You include all or a substantial portion of the database
+ contents in a database in which You have Sui Generis Database
+ Rights, then the database in which You have Sui Generis Database
+ Rights (but not its individual contents) is Adapted Material,
+
+ including for purposes of Section 3(b); and
+ c. You must comply with the conditions in Section 3(a) if You Share
+ all or a substantial portion of the contents of the database.
+
+For the avoidance of doubt, this Section 4 supplements and does not
+replace Your obligations under this Public License where the Licensed
+Rights include other Copyright and Similar Rights.
+
+
+Section 5 -- Disclaimer of Warranties and Limitation of Liability.
+
+ a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
+ EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
+ AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
+ ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
+ IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
+ WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
+ PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
+ ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
+ KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
+ ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
+
+ b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
+ TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
+ NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
+ INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
+ COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
+ USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
+ ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
+ DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
+ IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
+
+ c. The disclaimer of warranties and limitation of liability provided
+ above shall be interpreted in a manner that, to the extent
+ possible, most closely approximates an absolute disclaimer and
+ waiver of all liability.
+
+
+Section 6 -- Term and Termination.
+
+ a. This Public License applies for the term of the Copyright and
+ Similar Rights licensed here. However, if You fail to comply with
+ this Public License, then Your rights under this Public License
+ terminate automatically.
+
+ b. Where Your right to use the Licensed Material has terminated under
+ Section 6(a), it reinstates:
+
+ 1. automatically as of the date the violation is cured, provided
+ it is cured within 30 days of Your discovery of the
+ violation; or
+
+ 2. upon express reinstatement by the Licensor.
+
+ For the avoidance of doubt, this Section 6(b) does not affect any
+ right the Licensor may have to seek remedies for Your violations
+ of this Public License.
+
+ c. For the avoidance of doubt, the Licensor may also offer the
+ Licensed Material under separate terms or conditions or stop
+ distributing the Licensed Material at any time; however, doing so
+ will not terminate this Public License.
+
+ d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
+ License.
+
+
+Section 7 -- Other Terms and Conditions.
+
+ a. The Licensor shall not be bound by any additional or different
+ terms or conditions communicated by You unless expressly agreed.
+
+ b. Any arrangements, understandings, or agreements regarding the
+ Licensed Material not stated herein are separate from and
+ independent of the terms and conditions of this Public License.
+
+
+Section 8 -- Interpretation.
+
+ a. For the avoidance of doubt, this Public License does not, and
+ shall not be interpreted to, reduce, limit, restrict, or impose
+ conditions on any use of the Licensed Material that could lawfully
+ be made without permission under this Public License.
+
+ b. To the extent possible, if any provision of this Public License is
+ deemed unenforceable, it shall be automatically reformed to the
+ minimum extent necessary to make it enforceable. If the provision
+ cannot be reformed, it shall be severed from this Public License
+ without affecting the enforceability of the remaining terms and
+ conditions.
+
+ c. No term or condition of this Public License will be waived and no
+ failure to comply consented to unless expressly agreed to by the
+ Licensor.
+
+ d. Nothing in this Public License constitutes or may be interpreted
+ as a limitation upon, or waiver of, any privileges and immunities
+ that apply to the Licensor or You, including from the legal
+ processes of any jurisdiction or authority.
+
+
+=======================================================================
+
+Creative Commons is not a party to its public licenses.
+Notwithstanding, Creative Commons may elect to apply one of its public
+licenses to material it publishes and in those instances will be
+considered the "Licensor." Except for the limited purpose of indicating
+that material is shared under a Creative Commons public license or as
+otherwise permitted by the Creative Commons policies published at
+creativecommons.org/policies, Creative Commons does not authorize the
+use of the trademark "Creative Commons" or any other trademark or logo
+of Creative Commons without its prior written consent including,
+without limitation, in connection with any unauthorized modifications
+to any of its public licenses or any other arrangements,
+understandings, or agreements concerning use of licensed material. For
+the avoidance of doubt, this paragraph does not form part of the public
+licenses.
+
+Creative Commons may be contacted at creativecommons.org.
diff --git a/vendor/github.com/docker/spdystream/MAINTAINERS b/vendor/github.com/docker/spdystream/MAINTAINERS
new file mode 100644
index 00000000000..14e263325c7
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/MAINTAINERS
@@ -0,0 +1,28 @@
+# Spdystream maintainers file
+#
+# This file describes who runs the docker/spdystream project and how.
+# This is a living document - if you see something out of date or missing, speak up!
+#
+# It is structured to be consumable by both humans and programs.
+# To extract its contents programmatically, use any TOML-compliant parser.
+#
+# This file is compiled into the MAINTAINERS file in docker/opensource.
+#
+[Org]
+ [Org."Core maintainers"]
+ people = [
+ "dmcgowan",
+ ]
+
+[people]
+
+# A reference list of all people associated with the project.
+# All other sections should refer to people by their canonical key
+# in the people section.
+
+ # ADD YOURSELF HERE IN ALPHABETICAL ORDER
+
+ [people.dmcgowan]
+ Name = "Derek McGowan"
+ Email = "derek@docker.com"
+ GitHub = "dmcgowan"
diff --git a/vendor/github.com/docker/spdystream/README.md b/vendor/github.com/docker/spdystream/README.md
new file mode 100644
index 00000000000..11cccd0a09e
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/README.md
@@ -0,0 +1,77 @@
+# SpdyStream
+
+A multiplexed stream library using spdy
+
+## Usage
+
+Client example (connecting to mirroring server without auth)
+
+```go
+package main
+
+import (
+ "fmt"
+ "github.com/docker/spdystream"
+ "net"
+ "net/http"
+)
+
+func main() {
+ conn, err := net.Dial("tcp", "localhost:8080")
+ if err != nil {
+ panic(err)
+ }
+ spdyConn, err := spdystream.NewConnection(conn, false)
+ if err != nil {
+ panic(err)
+ }
+ go spdyConn.Serve(spdystream.NoOpStreamHandler)
+ stream, err := spdyConn.CreateStream(http.Header{}, nil, false)
+ if err != nil {
+ panic(err)
+ }
+
+ stream.Wait()
+
+ fmt.Fprint(stream, "Writing to stream")
+
+ buf := make([]byte, 25)
+ stream.Read(buf)
+ fmt.Println(string(buf))
+
+ stream.Close()
+}
+```
+
+Server example (mirroring server without auth)
+
+```go
+package main
+
+import (
+ "github.com/docker/spdystream"
+ "net"
+)
+
+func main() {
+ listener, err := net.Listen("tcp", "localhost:8080")
+ if err != nil {
+ panic(err)
+ }
+ for {
+ conn, err := listener.Accept()
+ if err != nil {
+ panic(err)
+ }
+ spdyConn, err := spdystream.NewConnection(conn, true)
+ if err != nil {
+ panic(err)
+ }
+ go spdyConn.Serve(spdystream.MirrorStreamHandler)
+ }
+}
+```
+
+## Copyright and license
+
+Copyright © 2014-2015 Docker, Inc. All rights reserved, except as follows. Code is released under the Apache 2.0 license. The README.md file, and files in the "docs" folder are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file "LICENSE.docs". You may obtain a duplicate copy of the same license, titled CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/.
diff --git a/vendor/github.com/docker/spdystream/connection.go b/vendor/github.com/docker/spdystream/connection.go
new file mode 100644
index 00000000000..6031a0db1ab
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/connection.go
@@ -0,0 +1,958 @@
+package spdystream
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "sync"
+ "time"
+
+ "github.com/docker/spdystream/spdy"
+)
+
+var (
+ ErrInvalidStreamId = errors.New("Invalid stream id")
+ ErrTimeout = errors.New("Timeout occured")
+ ErrReset = errors.New("Stream reset")
+ ErrWriteClosedStream = errors.New("Write on closed stream")
+)
+
+const (
+ FRAME_WORKERS = 5
+ QUEUE_SIZE = 50
+)
+
+type StreamHandler func(stream *Stream)
+
+type AuthHandler func(header http.Header, slot uint8, parent uint32) bool
+
+type idleAwareFramer struct {
+ f *spdy.Framer
+ conn *Connection
+ writeLock sync.Mutex
+ resetChan chan struct{}
+ setTimeoutLock sync.Mutex
+ setTimeoutChan chan time.Duration
+ timeout time.Duration
+}
+
+func newIdleAwareFramer(framer *spdy.Framer) *idleAwareFramer {
+ iaf := &idleAwareFramer{
+ f: framer,
+ resetChan: make(chan struct{}, 2),
+ // setTimeoutChan needs to be buffered to avoid deadlocks when calling setIdleTimeout at about
+ // the same time the connection is being closed
+ setTimeoutChan: make(chan time.Duration, 1),
+ }
+ return iaf
+}
+
+func (i *idleAwareFramer) monitor() {
+ var (
+ timer *time.Timer
+ expired <-chan time.Time
+ resetChan = i.resetChan
+ setTimeoutChan = i.setTimeoutChan
+ )
+Loop:
+ for {
+ select {
+ case timeout := <-i.setTimeoutChan:
+ i.timeout = timeout
+ if timeout == 0 {
+ if timer != nil {
+ timer.Stop()
+ }
+ } else {
+ if timer == nil {
+ timer = time.NewTimer(timeout)
+ expired = timer.C
+ } else {
+ timer.Reset(timeout)
+ }
+ }
+ case <-resetChan:
+ if timer != nil && i.timeout > 0 {
+ timer.Reset(i.timeout)
+ }
+ case <-expired:
+ i.conn.streamCond.L.Lock()
+ streams := i.conn.streams
+ i.conn.streams = make(map[spdy.StreamId]*Stream)
+ i.conn.streamCond.Broadcast()
+ i.conn.streamCond.L.Unlock()
+ go func() {
+ for _, stream := range streams {
+ stream.resetStream()
+ }
+ i.conn.Close()
+ }()
+ case <-i.conn.closeChan:
+ if timer != nil {
+ timer.Stop()
+ }
+
+ // Start a goroutine to drain resetChan. This is needed because we've seen
+ // some unit tests with large numbers of goroutines get into a situation
+ // where resetChan fills up, at least 1 call to Write() is still trying to
+ // send to resetChan, the connection gets closed, and this case statement
+ // attempts to grab the write lock that Write() already has, causing a
+ // deadlock.
+ //
+ // See https://github.com/docker/spdystream/issues/49 for more details.
+ go func() {
+ for _ = range resetChan {
+ }
+ }()
+
+ go func() {
+ for _ = range setTimeoutChan {
+ }
+ }()
+
+ i.writeLock.Lock()
+ close(resetChan)
+ i.resetChan = nil
+ i.writeLock.Unlock()
+
+ i.setTimeoutLock.Lock()
+ close(i.setTimeoutChan)
+ i.setTimeoutChan = nil
+ i.setTimeoutLock.Unlock()
+
+ break Loop
+ }
+ }
+
+ // Drain resetChan
+ for _ = range resetChan {
+ }
+}
+
+func (i *idleAwareFramer) WriteFrame(frame spdy.Frame) error {
+ i.writeLock.Lock()
+ defer i.writeLock.Unlock()
+ if i.resetChan == nil {
+ return io.EOF
+ }
+ err := i.f.WriteFrame(frame)
+ if err != nil {
+ return err
+ }
+
+ i.resetChan <- struct{}{}
+
+ return nil
+}
+
+func (i *idleAwareFramer) ReadFrame() (spdy.Frame, error) {
+ frame, err := i.f.ReadFrame()
+ if err != nil {
+ return nil, err
+ }
+
+ // resetChan should never be closed since it is only closed
+ // when the connection has closed its closeChan. This closure
+ // only occurs after all Reads have finished
+ // TODO (dmcgowan): refactor relationship into connection
+ i.resetChan <- struct{}{}
+
+ return frame, nil
+}
+
+func (i *idleAwareFramer) setIdleTimeout(timeout time.Duration) {
+ i.setTimeoutLock.Lock()
+ defer i.setTimeoutLock.Unlock()
+
+ if i.setTimeoutChan == nil {
+ return
+ }
+
+ i.setTimeoutChan <- timeout
+}
+
+type Connection struct {
+ conn net.Conn
+ framer *idleAwareFramer
+
+ closeChan chan bool
+ goneAway bool
+ lastStreamChan chan<- *Stream
+ goAwayTimeout time.Duration
+ closeTimeout time.Duration
+
+ streamLock *sync.RWMutex
+ streamCond *sync.Cond
+ streams map[spdy.StreamId]*Stream
+
+ nextIdLock sync.Mutex
+ receiveIdLock sync.Mutex
+ nextStreamId spdy.StreamId
+ receivedStreamId spdy.StreamId
+
+ pingIdLock sync.Mutex
+ pingId uint32
+ pingChans map[uint32]chan error
+
+ shutdownLock sync.Mutex
+ shutdownChan chan error
+ hasShutdown bool
+
+ // for testing https://github.com/docker/spdystream/pull/56
+ dataFrameHandler func(*spdy.DataFrame) error
+}
+
+// NewConnection creates a new spdy connection from an existing
+// network connection.
+func NewConnection(conn net.Conn, server bool) (*Connection, error) {
+ framer, framerErr := spdy.NewFramer(conn, conn)
+ if framerErr != nil {
+ return nil, framerErr
+ }
+ idleAwareFramer := newIdleAwareFramer(framer)
+ var sid spdy.StreamId
+ var rid spdy.StreamId
+ var pid uint32
+ if server {
+ sid = 2
+ rid = 1
+ pid = 2
+ } else {
+ sid = 1
+ rid = 2
+ pid = 1
+ }
+
+ streamLock := new(sync.RWMutex)
+ streamCond := sync.NewCond(streamLock)
+
+ session := &Connection{
+ conn: conn,
+ framer: idleAwareFramer,
+
+ closeChan: make(chan bool),
+ goAwayTimeout: time.Duration(0),
+ closeTimeout: time.Duration(0),
+
+ streamLock: streamLock,
+ streamCond: streamCond,
+ streams: make(map[spdy.StreamId]*Stream),
+ nextStreamId: sid,
+ receivedStreamId: rid,
+
+ pingId: pid,
+ pingChans: make(map[uint32]chan error),
+
+ shutdownChan: make(chan error),
+ }
+ session.dataFrameHandler = session.handleDataFrame
+ idleAwareFramer.conn = session
+ go idleAwareFramer.monitor()
+
+ return session, nil
+}
+
+// Ping sends a ping frame across the connection and
+// returns the response time
+func (s *Connection) Ping() (time.Duration, error) {
+ pid := s.pingId
+ s.pingIdLock.Lock()
+ if s.pingId > 0x7ffffffe {
+ s.pingId = s.pingId - 0x7ffffffe
+ } else {
+ s.pingId = s.pingId + 2
+ }
+ s.pingIdLock.Unlock()
+ pingChan := make(chan error)
+ s.pingChans[pid] = pingChan
+ defer delete(s.pingChans, pid)
+
+ frame := &spdy.PingFrame{Id: pid}
+ startTime := time.Now()
+ writeErr := s.framer.WriteFrame(frame)
+ if writeErr != nil {
+ return time.Duration(0), writeErr
+ }
+ select {
+ case <-s.closeChan:
+ return time.Duration(0), errors.New("connection closed")
+ case err, ok := <-pingChan:
+ if ok && err != nil {
+ return time.Duration(0), err
+ }
+ break
+ }
+ return time.Now().Sub(startTime), nil
+}
+
+// Serve handles frames sent from the server, including reply frames
+// which are needed to fully initiate connections. Both clients and servers
+// should call Serve in a separate goroutine before creating streams.
+func (s *Connection) Serve(newHandler StreamHandler) {
+ // use a WaitGroup to wait for all frames to be drained after receiving
+ // go-away.
+ var wg sync.WaitGroup
+
+ // Parition queues to ensure stream frames are handled
+ // by the same worker, ensuring order is maintained
+ frameQueues := make([]*PriorityFrameQueue, FRAME_WORKERS)
+ for i := 0; i < FRAME_WORKERS; i++ {
+ frameQueues[i] = NewPriorityFrameQueue(QUEUE_SIZE)
+
+ // Ensure frame queue is drained when connection is closed
+ go func(frameQueue *PriorityFrameQueue) {
+ <-s.closeChan
+ frameQueue.Drain()
+ }(frameQueues[i])
+
+ wg.Add(1)
+ go func(frameQueue *PriorityFrameQueue) {
+ // let the WaitGroup know this worker is done
+ defer wg.Done()
+
+ s.frameHandler(frameQueue, newHandler)
+ }(frameQueues[i])
+ }
+
+ var (
+ partitionRoundRobin int
+ goAwayFrame *spdy.GoAwayFrame
+ )
+Loop:
+ for {
+ readFrame, err := s.framer.ReadFrame()
+ if err != nil {
+ if err != io.EOF {
+ fmt.Errorf("frame read error: %s", err)
+ } else {
+ debugMessage("(%p) EOF received", s)
+ }
+ break
+ }
+ var priority uint8
+ var partition int
+ switch frame := readFrame.(type) {
+ case *spdy.SynStreamFrame:
+ if s.checkStreamFrame(frame) {
+ priority = frame.Priority
+ partition = int(frame.StreamId % FRAME_WORKERS)
+ debugMessage("(%p) Add stream frame: %d ", s, frame.StreamId)
+ s.addStreamFrame(frame)
+ } else {
+ debugMessage("(%p) Rejected stream frame: %d ", s, frame.StreamId)
+ continue
+ }
+ case *spdy.SynReplyFrame:
+ priority = s.getStreamPriority(frame.StreamId)
+ partition = int(frame.StreamId % FRAME_WORKERS)
+ case *spdy.DataFrame:
+ priority = s.getStreamPriority(frame.StreamId)
+ partition = int(frame.StreamId % FRAME_WORKERS)
+ case *spdy.RstStreamFrame:
+ priority = s.getStreamPriority(frame.StreamId)
+ partition = int(frame.StreamId % FRAME_WORKERS)
+ case *spdy.HeadersFrame:
+ priority = s.getStreamPriority(frame.StreamId)
+ partition = int(frame.StreamId % FRAME_WORKERS)
+ case *spdy.PingFrame:
+ priority = 0
+ partition = partitionRoundRobin
+ partitionRoundRobin = (partitionRoundRobin + 1) % FRAME_WORKERS
+ case *spdy.GoAwayFrame:
+ // hold on to the go away frame and exit the loop
+ goAwayFrame = frame
+ break Loop
+ default:
+ priority = 7
+ partition = partitionRoundRobin
+ partitionRoundRobin = (partitionRoundRobin + 1) % FRAME_WORKERS
+ }
+ frameQueues[partition].Push(readFrame, priority)
+ }
+ close(s.closeChan)
+
+ // wait for all frame handler workers to indicate they've drained their queues
+ // before handling the go away frame
+ wg.Wait()
+
+ if goAwayFrame != nil {
+ s.handleGoAwayFrame(goAwayFrame)
+ }
+
+ // now it's safe to close remote channels and empty s.streams
+ s.streamCond.L.Lock()
+ // notify streams that they're now closed, which will
+ // unblock any stream Read() calls
+ for _, stream := range s.streams {
+ stream.closeRemoteChannels()
+ }
+ s.streams = make(map[spdy.StreamId]*Stream)
+ s.streamCond.Broadcast()
+ s.streamCond.L.Unlock()
+}
+
+func (s *Connection) frameHandler(frameQueue *PriorityFrameQueue, newHandler StreamHandler) {
+ for {
+ popFrame := frameQueue.Pop()
+ if popFrame == nil {
+ return
+ }
+
+ var frameErr error
+ switch frame := popFrame.(type) {
+ case *spdy.SynStreamFrame:
+ frameErr = s.handleStreamFrame(frame, newHandler)
+ case *spdy.SynReplyFrame:
+ frameErr = s.handleReplyFrame(frame)
+ case *spdy.DataFrame:
+ frameErr = s.dataFrameHandler(frame)
+ case *spdy.RstStreamFrame:
+ frameErr = s.handleResetFrame(frame)
+ case *spdy.HeadersFrame:
+ frameErr = s.handleHeaderFrame(frame)
+ case *spdy.PingFrame:
+ frameErr = s.handlePingFrame(frame)
+ case *spdy.GoAwayFrame:
+ frameErr = s.handleGoAwayFrame(frame)
+ default:
+ frameErr = fmt.Errorf("unhandled frame type: %T", frame)
+ }
+
+ if frameErr != nil {
+ fmt.Errorf("frame handling error: %s", frameErr)
+ }
+ }
+}
+
+func (s *Connection) getStreamPriority(streamId spdy.StreamId) uint8 {
+ stream, streamOk := s.getStream(streamId)
+ if !streamOk {
+ return 7
+ }
+ return stream.priority
+}
+
+func (s *Connection) addStreamFrame(frame *spdy.SynStreamFrame) {
+ var parent *Stream
+ if frame.AssociatedToStreamId != spdy.StreamId(0) {
+ parent, _ = s.getStream(frame.AssociatedToStreamId)
+ }
+
+ stream := &Stream{
+ streamId: frame.StreamId,
+ parent: parent,
+ conn: s,
+ startChan: make(chan error),
+ headers: frame.Headers,
+ finished: (frame.CFHeader.Flags & spdy.ControlFlagUnidirectional) != 0x00,
+ replyCond: sync.NewCond(new(sync.Mutex)),
+ dataChan: make(chan []byte),
+ headerChan: make(chan http.Header),
+ closeChan: make(chan bool),
+ }
+ if frame.CFHeader.Flags&spdy.ControlFlagFin != 0x00 {
+ stream.closeRemoteChannels()
+ }
+
+ s.addStream(stream)
+}
+
+// checkStreamFrame checks to see if a stream frame is allowed.
+// If the stream is invalid, then a reset frame with protocol error
+// will be returned.
+func (s *Connection) checkStreamFrame(frame *spdy.SynStreamFrame) bool {
+ s.receiveIdLock.Lock()
+ defer s.receiveIdLock.Unlock()
+ if s.goneAway {
+ return false
+ }
+ validationErr := s.validateStreamId(frame.StreamId)
+ if validationErr != nil {
+ go func() {
+ resetErr := s.sendResetFrame(spdy.ProtocolError, frame.StreamId)
+ if resetErr != nil {
+ fmt.Errorf("reset error: %s", resetErr)
+ }
+ }()
+ return false
+ }
+ return true
+}
+
+func (s *Connection) handleStreamFrame(frame *spdy.SynStreamFrame, newHandler StreamHandler) error {
+ stream, ok := s.getStream(frame.StreamId)
+ if !ok {
+ return fmt.Errorf("Missing stream: %d", frame.StreamId)
+ }
+
+ newHandler(stream)
+
+ return nil
+}
+
+func (s *Connection) handleReplyFrame(frame *spdy.SynReplyFrame) error {
+ debugMessage("(%p) Reply frame received for %d", s, frame.StreamId)
+ stream, streamOk := s.getStream(frame.StreamId)
+ if !streamOk {
+ debugMessage("Reply frame gone away for %d", frame.StreamId)
+ // Stream has already gone away
+ return nil
+ }
+ if stream.replied {
+ // Stream has already received reply
+ return nil
+ }
+ stream.replied = true
+
+ // TODO Check for error
+ if (frame.CFHeader.Flags & spdy.ControlFlagFin) != 0x00 {
+ s.remoteStreamFinish(stream)
+ }
+
+ close(stream.startChan)
+
+ return nil
+}
+
+func (s *Connection) handleResetFrame(frame *spdy.RstStreamFrame) error {
+ stream, streamOk := s.getStream(frame.StreamId)
+ if !streamOk {
+ // Stream has already been removed
+ return nil
+ }
+ s.removeStream(stream)
+ stream.closeRemoteChannels()
+
+ if !stream.replied {
+ stream.replied = true
+ stream.startChan <- ErrReset
+ close(stream.startChan)
+ }
+
+ stream.finishLock.Lock()
+ stream.finished = true
+ stream.finishLock.Unlock()
+
+ return nil
+}
+
+func (s *Connection) handleHeaderFrame(frame *spdy.HeadersFrame) error {
+ stream, streamOk := s.getStream(frame.StreamId)
+ if !streamOk {
+ // Stream has already gone away
+ return nil
+ }
+ if !stream.replied {
+ // No reply received...Protocol error?
+ return nil
+ }
+
+ // TODO limit headers while not blocking (use buffered chan or goroutine?)
+ select {
+ case <-stream.closeChan:
+ return nil
+ case stream.headerChan <- frame.Headers:
+ }
+
+ if (frame.CFHeader.Flags & spdy.ControlFlagFin) != 0x00 {
+ s.remoteStreamFinish(stream)
+ }
+
+ return nil
+}
+
+func (s *Connection) handleDataFrame(frame *spdy.DataFrame) error {
+ debugMessage("(%p) Data frame received for %d", s, frame.StreamId)
+ stream, streamOk := s.getStream(frame.StreamId)
+ if !streamOk {
+ debugMessage("(%p) Data frame gone away for %d", s, frame.StreamId)
+ // Stream has already gone away
+ return nil
+ }
+ if !stream.replied {
+ debugMessage("(%p) Data frame not replied %d", s, frame.StreamId)
+ // No reply received...Protocol error?
+ return nil
+ }
+
+ debugMessage("(%p) (%d) Data frame handling", stream, stream.streamId)
+ if len(frame.Data) > 0 {
+ stream.dataLock.RLock()
+ select {
+ case <-stream.closeChan:
+ debugMessage("(%p) (%d) Data frame not sent (stream shut down)", stream, stream.streamId)
+ case stream.dataChan <- frame.Data:
+ debugMessage("(%p) (%d) Data frame sent", stream, stream.streamId)
+ }
+ stream.dataLock.RUnlock()
+ }
+ if (frame.Flags & spdy.DataFlagFin) != 0x00 {
+ s.remoteStreamFinish(stream)
+ }
+ return nil
+}
+
+func (s *Connection) handlePingFrame(frame *spdy.PingFrame) error {
+ if s.pingId&0x01 != frame.Id&0x01 {
+ return s.framer.WriteFrame(frame)
+ }
+ pingChan, pingOk := s.pingChans[frame.Id]
+ if pingOk {
+ close(pingChan)
+ }
+ return nil
+}
+
+func (s *Connection) handleGoAwayFrame(frame *spdy.GoAwayFrame) error {
+ debugMessage("(%p) Go away received", s)
+ s.receiveIdLock.Lock()
+ if s.goneAway {
+ s.receiveIdLock.Unlock()
+ return nil
+ }
+ s.goneAway = true
+ s.receiveIdLock.Unlock()
+
+ if s.lastStreamChan != nil {
+ stream, _ := s.getStream(frame.LastGoodStreamId)
+ go func() {
+ s.lastStreamChan <- stream
+ }()
+ }
+
+ // Do not block frame handler waiting for closure
+ go s.shutdown(s.goAwayTimeout)
+
+ return nil
+}
+
+func (s *Connection) remoteStreamFinish(stream *Stream) {
+ stream.closeRemoteChannels()
+
+ stream.finishLock.Lock()
+ if stream.finished {
+ // Stream is fully closed, cleanup
+ s.removeStream(stream)
+ }
+ stream.finishLock.Unlock()
+}
+
+// CreateStream creates a new spdy stream using the parameters for
+// creating the stream frame. The stream frame will be sent upon
+// calling this function, however this function does not wait for
+// the reply frame. If waiting for the reply is desired, use
+// the stream Wait or WaitTimeout function on the stream returned
+// by this function.
+func (s *Connection) CreateStream(headers http.Header, parent *Stream, fin bool) (*Stream, error) {
+ // MUST synchronize stream creation (all the way to writing the frame)
+ // as stream IDs **MUST** increase monotonically.
+ s.nextIdLock.Lock()
+ defer s.nextIdLock.Unlock()
+
+ streamId := s.getNextStreamId()
+ if streamId == 0 {
+ return nil, fmt.Errorf("Unable to get new stream id")
+ }
+
+ stream := &Stream{
+ streamId: streamId,
+ parent: parent,
+ conn: s,
+ startChan: make(chan error),
+ headers: headers,
+ dataChan: make(chan []byte),
+ headerChan: make(chan http.Header),
+ closeChan: make(chan bool),
+ }
+
+ debugMessage("(%p) (%p) Create stream", s, stream)
+
+ s.addStream(stream)
+
+ return stream, s.sendStream(stream, fin)
+}
+
+func (s *Connection) shutdown(closeTimeout time.Duration) {
+ // TODO Ensure this isn't called multiple times
+ s.shutdownLock.Lock()
+ if s.hasShutdown {
+ s.shutdownLock.Unlock()
+ return
+ }
+ s.hasShutdown = true
+ s.shutdownLock.Unlock()
+
+ var timeout <-chan time.Time
+ if closeTimeout > time.Duration(0) {
+ timeout = time.After(closeTimeout)
+ }
+ streamsClosed := make(chan bool)
+
+ go func() {
+ s.streamCond.L.Lock()
+ for len(s.streams) > 0 {
+ debugMessage("Streams opened: %d, %#v", len(s.streams), s.streams)
+ s.streamCond.Wait()
+ }
+ s.streamCond.L.Unlock()
+ close(streamsClosed)
+ }()
+
+ var err error
+ select {
+ case <-streamsClosed:
+ // No active streams, close should be safe
+ err = s.conn.Close()
+ case <-timeout:
+ // Force ungraceful close
+ err = s.conn.Close()
+ // Wait for cleanup to clear active streams
+ <-streamsClosed
+ }
+
+ if err != nil {
+ duration := 10 * time.Minute
+ time.AfterFunc(duration, func() {
+ select {
+ case err, ok := <-s.shutdownChan:
+ if ok {
+ fmt.Errorf("Unhandled close error after %s: %s", duration, err)
+ }
+ default:
+ }
+ })
+ s.shutdownChan <- err
+ }
+ close(s.shutdownChan)
+
+ return
+}
+
+// Closes spdy connection by sending GoAway frame and initiating shutdown
+func (s *Connection) Close() error {
+ s.receiveIdLock.Lock()
+ if s.goneAway {
+ s.receiveIdLock.Unlock()
+ return nil
+ }
+ s.goneAway = true
+ s.receiveIdLock.Unlock()
+
+ var lastStreamId spdy.StreamId
+ if s.receivedStreamId > 2 {
+ lastStreamId = s.receivedStreamId - 2
+ }
+
+ goAwayFrame := &spdy.GoAwayFrame{
+ LastGoodStreamId: lastStreamId,
+ Status: spdy.GoAwayOK,
+ }
+
+ err := s.framer.WriteFrame(goAwayFrame)
+ if err != nil {
+ return err
+ }
+
+ go s.shutdown(s.closeTimeout)
+
+ return nil
+}
+
+// CloseWait closes the connection and waits for shutdown
+// to finish. Note the underlying network Connection
+// is not closed until the end of shutdown.
+func (s *Connection) CloseWait() error {
+ closeErr := s.Close()
+ if closeErr != nil {
+ return closeErr
+ }
+ shutdownErr, ok := <-s.shutdownChan
+ if ok {
+ return shutdownErr
+ }
+ return nil
+}
+
+// Wait waits for the connection to finish shutdown or for
+// the wait timeout duration to expire. This needs to be
+// called either after Close has been called or the GOAWAYFRAME
+// has been received. If the wait timeout is 0, this function
+// will block until shutdown finishes. If wait is never called
+// and a shutdown error occurs, that error will be logged as an
+// unhandled error.
+func (s *Connection) Wait(waitTimeout time.Duration) error {
+ var timeout <-chan time.Time
+ if waitTimeout > time.Duration(0) {
+ timeout = time.After(waitTimeout)
+ }
+
+ select {
+ case err, ok := <-s.shutdownChan:
+ if ok {
+ return err
+ }
+ case <-timeout:
+ return ErrTimeout
+ }
+ return nil
+}
+
+// NotifyClose registers a channel to be called when the remote
+// peer inidicates connection closure. The last stream to be
+// received by the remote will be sent on the channel. The notify
+// timeout will determine the duration between go away received
+// and the connection being closed.
+func (s *Connection) NotifyClose(c chan<- *Stream, timeout time.Duration) {
+ s.goAwayTimeout = timeout
+ s.lastStreamChan = c
+}
+
+// SetCloseTimeout sets the amount of time close will wait for
+// streams to finish before terminating the underlying network
+// connection. Setting the timeout to 0 will cause close to
+// wait forever, which is the default.
+func (s *Connection) SetCloseTimeout(timeout time.Duration) {
+ s.closeTimeout = timeout
+}
+
+// SetIdleTimeout sets the amount of time the connection may sit idle before
+// it is forcefully terminated.
+func (s *Connection) SetIdleTimeout(timeout time.Duration) {
+ s.framer.setIdleTimeout(timeout)
+}
+
+func (s *Connection) sendHeaders(headers http.Header, stream *Stream, fin bool) error {
+ var flags spdy.ControlFlags
+ if fin {
+ flags = spdy.ControlFlagFin
+ }
+
+ headerFrame := &spdy.HeadersFrame{
+ StreamId: stream.streamId,
+ Headers: headers,
+ CFHeader: spdy.ControlFrameHeader{Flags: flags},
+ }
+
+ return s.framer.WriteFrame(headerFrame)
+}
+
+func (s *Connection) sendReply(headers http.Header, stream *Stream, fin bool) error {
+ var flags spdy.ControlFlags
+ if fin {
+ flags = spdy.ControlFlagFin
+ }
+
+ replyFrame := &spdy.SynReplyFrame{
+ StreamId: stream.streamId,
+ Headers: headers,
+ CFHeader: spdy.ControlFrameHeader{Flags: flags},
+ }
+
+ return s.framer.WriteFrame(replyFrame)
+}
+
+func (s *Connection) sendResetFrame(status spdy.RstStreamStatus, streamId spdy.StreamId) error {
+ resetFrame := &spdy.RstStreamFrame{
+ StreamId: streamId,
+ Status: status,
+ }
+
+ return s.framer.WriteFrame(resetFrame)
+}
+
+func (s *Connection) sendReset(status spdy.RstStreamStatus, stream *Stream) error {
+ return s.sendResetFrame(status, stream.streamId)
+}
+
+func (s *Connection) sendStream(stream *Stream, fin bool) error {
+ var flags spdy.ControlFlags
+ if fin {
+ flags = spdy.ControlFlagFin
+ stream.finished = true
+ }
+
+ var parentId spdy.StreamId
+ if stream.parent != nil {
+ parentId = stream.parent.streamId
+ }
+
+ streamFrame := &spdy.SynStreamFrame{
+ StreamId: spdy.StreamId(stream.streamId),
+ AssociatedToStreamId: spdy.StreamId(parentId),
+ Headers: stream.headers,
+ CFHeader: spdy.ControlFrameHeader{Flags: flags},
+ }
+
+ return s.framer.WriteFrame(streamFrame)
+}
+
+// getNextStreamId returns the next sequential id
+// every call should produce a unique value or an error
+func (s *Connection) getNextStreamId() spdy.StreamId {
+ sid := s.nextStreamId
+ if sid > 0x7fffffff {
+ return 0
+ }
+ s.nextStreamId = s.nextStreamId + 2
+ return sid
+}
+
+// PeekNextStreamId returns the next sequential id and keeps the next id untouched
+func (s *Connection) PeekNextStreamId() spdy.StreamId {
+ sid := s.nextStreamId
+ return sid
+}
+
+func (s *Connection) validateStreamId(rid spdy.StreamId) error {
+ if rid > 0x7fffffff || rid < s.receivedStreamId {
+ return ErrInvalidStreamId
+ }
+ s.receivedStreamId = rid + 2
+ return nil
+}
+
+func (s *Connection) addStream(stream *Stream) {
+ s.streamCond.L.Lock()
+ s.streams[stream.streamId] = stream
+ debugMessage("(%p) (%p) Stream added, broadcasting: %d", s, stream, stream.streamId)
+ s.streamCond.Broadcast()
+ s.streamCond.L.Unlock()
+}
+
+func (s *Connection) removeStream(stream *Stream) {
+ s.streamCond.L.Lock()
+ delete(s.streams, stream.streamId)
+ debugMessage("(%p) (%p) Stream removed, broadcasting: %d", s, stream, stream.streamId)
+ s.streamCond.Broadcast()
+ s.streamCond.L.Unlock()
+}
+
+func (s *Connection) getStream(streamId spdy.StreamId) (stream *Stream, ok bool) {
+ s.streamLock.RLock()
+ stream, ok = s.streams[streamId]
+ s.streamLock.RUnlock()
+ return
+}
+
+// FindStream looks up the given stream id and either waits for the
+// stream to be found or returns nil if the stream id is no longer
+// valid.
+func (s *Connection) FindStream(streamId uint32) *Stream {
+ var stream *Stream
+ var ok bool
+ s.streamCond.L.Lock()
+ stream, ok = s.streams[spdy.StreamId(streamId)]
+ debugMessage("(%p) Found stream %d? %t", s, spdy.StreamId(streamId), ok)
+ for !ok && streamId >= uint32(s.receivedStreamId) {
+ s.streamCond.Wait()
+ stream, ok = s.streams[spdy.StreamId(streamId)]
+ }
+ s.streamCond.L.Unlock()
+ return stream
+}
+
+func (s *Connection) CloseChan() <-chan bool {
+ return s.closeChan
+}
diff --git a/vendor/github.com/docker/spdystream/handlers.go b/vendor/github.com/docker/spdystream/handlers.go
new file mode 100644
index 00000000000..b59fa5fdcd0
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/handlers.go
@@ -0,0 +1,38 @@
+package spdystream
+
+import (
+ "io"
+ "net/http"
+)
+
+// MirrorStreamHandler mirrors all streams.
+func MirrorStreamHandler(stream *Stream) {
+ replyErr := stream.SendReply(http.Header{}, false)
+ if replyErr != nil {
+ return
+ }
+
+ go func() {
+ io.Copy(stream, stream)
+ stream.Close()
+ }()
+ go func() {
+ for {
+ header, receiveErr := stream.ReceiveHeader()
+ if receiveErr != nil {
+ return
+ }
+ sendErr := stream.SendHeader(header, false)
+ if sendErr != nil {
+ return
+ }
+ }
+ }()
+}
+
+// NoopStreamHandler does nothing when stream connects, most
+// likely used with RejectAuthHandler which will not allow any
+// streams to make it to the stream handler.
+func NoOpStreamHandler(stream *Stream) {
+ stream.SendReply(http.Header{}, false)
+}
diff --git a/vendor/github.com/docker/spdystream/priority.go b/vendor/github.com/docker/spdystream/priority.go
new file mode 100644
index 00000000000..fc8582b5c6f
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/priority.go
@@ -0,0 +1,98 @@
+package spdystream
+
+import (
+ "container/heap"
+ "sync"
+
+ "github.com/docker/spdystream/spdy"
+)
+
+type prioritizedFrame struct {
+ frame spdy.Frame
+ priority uint8
+ insertId uint64
+}
+
+type frameQueue []*prioritizedFrame
+
+func (fq frameQueue) Len() int {
+ return len(fq)
+}
+
+func (fq frameQueue) Less(i, j int) bool {
+ if fq[i].priority == fq[j].priority {
+ return fq[i].insertId < fq[j].insertId
+ }
+ return fq[i].priority < fq[j].priority
+}
+
+func (fq frameQueue) Swap(i, j int) {
+ fq[i], fq[j] = fq[j], fq[i]
+}
+
+func (fq *frameQueue) Push(x interface{}) {
+ *fq = append(*fq, x.(*prioritizedFrame))
+}
+
+func (fq *frameQueue) Pop() interface{} {
+ old := *fq
+ n := len(old)
+ *fq = old[0 : n-1]
+ return old[n-1]
+}
+
+type PriorityFrameQueue struct {
+ queue *frameQueue
+ c *sync.Cond
+ size int
+ nextInsertId uint64
+ drain bool
+}
+
+func NewPriorityFrameQueue(size int) *PriorityFrameQueue {
+ queue := make(frameQueue, 0, size)
+ heap.Init(&queue)
+
+ return &PriorityFrameQueue{
+ queue: &queue,
+ size: size,
+ c: sync.NewCond(&sync.Mutex{}),
+ }
+}
+
+func (q *PriorityFrameQueue) Push(frame spdy.Frame, priority uint8) {
+ q.c.L.Lock()
+ defer q.c.L.Unlock()
+ for q.queue.Len() >= q.size {
+ q.c.Wait()
+ }
+ pFrame := &prioritizedFrame{
+ frame: frame,
+ priority: priority,
+ insertId: q.nextInsertId,
+ }
+ q.nextInsertId = q.nextInsertId + 1
+ heap.Push(q.queue, pFrame)
+ q.c.Signal()
+}
+
+func (q *PriorityFrameQueue) Pop() spdy.Frame {
+ q.c.L.Lock()
+ defer q.c.L.Unlock()
+ for q.queue.Len() == 0 {
+ if q.drain {
+ return nil
+ }
+ q.c.Wait()
+ }
+ frame := heap.Pop(q.queue).(*prioritizedFrame).frame
+ q.c.Signal()
+ return frame
+}
+
+func (q *PriorityFrameQueue) Drain() {
+ q.c.L.Lock()
+ defer q.c.L.Unlock()
+ q.drain = true
+ q.c.Broadcast()
+}
diff --git a/vendor/github.com/docker/spdystream/spdy/dictionary.go b/vendor/github.com/docker/spdystream/spdy/dictionary.go
new file mode 100644
index 00000000000..5a5ff0e14cd
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/spdy/dictionary.go
@@ -0,0 +1,187 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package spdy
+
+// headerDictionary is the dictionary sent to the zlib compressor/decompressor.
+var headerDictionary = []byte{
+ 0x00, 0x00, 0x00, 0x07, 0x6f, 0x70, 0x74, 0x69,
+ 0x6f, 0x6e, 0x73, 0x00, 0x00, 0x00, 0x04, 0x68,
+ 0x65, 0x61, 0x64, 0x00, 0x00, 0x00, 0x04, 0x70,
+ 0x6f, 0x73, 0x74, 0x00, 0x00, 0x00, 0x03, 0x70,
+ 0x75, 0x74, 0x00, 0x00, 0x00, 0x06, 0x64, 0x65,
+ 0x6c, 0x65, 0x74, 0x65, 0x00, 0x00, 0x00, 0x05,
+ 0x74, 0x72, 0x61, 0x63, 0x65, 0x00, 0x00, 0x00,
+ 0x06, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x00,
+ 0x00, 0x00, 0x0e, 0x61, 0x63, 0x63, 0x65, 0x70,
+ 0x74, 0x2d, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65,
+ 0x74, 0x00, 0x00, 0x00, 0x0f, 0x61, 0x63, 0x63,
+ 0x65, 0x70, 0x74, 0x2d, 0x65, 0x6e, 0x63, 0x6f,
+ 0x64, 0x69, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x0f,
+ 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x2d, 0x6c,
+ 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x00,
+ 0x00, 0x00, 0x0d, 0x61, 0x63, 0x63, 0x65, 0x70,
+ 0x74, 0x2d, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73,
+ 0x00, 0x00, 0x00, 0x03, 0x61, 0x67, 0x65, 0x00,
+ 0x00, 0x00, 0x05, 0x61, 0x6c, 0x6c, 0x6f, 0x77,
+ 0x00, 0x00, 0x00, 0x0d, 0x61, 0x75, 0x74, 0x68,
+ 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f,
+ 0x6e, 0x00, 0x00, 0x00, 0x0d, 0x63, 0x61, 0x63,
+ 0x68, 0x65, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72,
+ 0x6f, 0x6c, 0x00, 0x00, 0x00, 0x0a, 0x63, 0x6f,
+ 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
+ 0x00, 0x00, 0x00, 0x0c, 0x63, 0x6f, 0x6e, 0x74,
+ 0x65, 0x6e, 0x74, 0x2d, 0x62, 0x61, 0x73, 0x65,
+ 0x00, 0x00, 0x00, 0x10, 0x63, 0x6f, 0x6e, 0x74,
+ 0x65, 0x6e, 0x74, 0x2d, 0x65, 0x6e, 0x63, 0x6f,
+ 0x64, 0x69, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x10,
+ 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d,
+ 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65,
+ 0x00, 0x00, 0x00, 0x0e, 0x63, 0x6f, 0x6e, 0x74,
+ 0x65, 0x6e, 0x74, 0x2d, 0x6c, 0x65, 0x6e, 0x67,
+ 0x74, 0x68, 0x00, 0x00, 0x00, 0x10, 0x63, 0x6f,
+ 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x6c, 0x6f,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00,
+ 0x00, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e,
+ 0x74, 0x2d, 0x6d, 0x64, 0x35, 0x00, 0x00, 0x00,
+ 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74,
+ 0x2d, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x00, 0x00,
+ 0x00, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e,
+ 0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x00, 0x00,
+ 0x00, 0x04, 0x64, 0x61, 0x74, 0x65, 0x00, 0x00,
+ 0x00, 0x04, 0x65, 0x74, 0x61, 0x67, 0x00, 0x00,
+ 0x00, 0x06, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74,
+ 0x00, 0x00, 0x00, 0x07, 0x65, 0x78, 0x70, 0x69,
+ 0x72, 0x65, 0x73, 0x00, 0x00, 0x00, 0x04, 0x66,
+ 0x72, 0x6f, 0x6d, 0x00, 0x00, 0x00, 0x04, 0x68,
+ 0x6f, 0x73, 0x74, 0x00, 0x00, 0x00, 0x08, 0x69,
+ 0x66, 0x2d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x00,
+ 0x00, 0x00, 0x11, 0x69, 0x66, 0x2d, 0x6d, 0x6f,
+ 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x2d, 0x73,
+ 0x69, 0x6e, 0x63, 0x65, 0x00, 0x00, 0x00, 0x0d,
+ 0x69, 0x66, 0x2d, 0x6e, 0x6f, 0x6e, 0x65, 0x2d,
+ 0x6d, 0x61, 0x74, 0x63, 0x68, 0x00, 0x00, 0x00,
+ 0x08, 0x69, 0x66, 0x2d, 0x72, 0x61, 0x6e, 0x67,
+ 0x65, 0x00, 0x00, 0x00, 0x13, 0x69, 0x66, 0x2d,
+ 0x75, 0x6e, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69,
+ 0x65, 0x64, 0x2d, 0x73, 0x69, 0x6e, 0x63, 0x65,
+ 0x00, 0x00, 0x00, 0x0d, 0x6c, 0x61, 0x73, 0x74,
+ 0x2d, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65,
+ 0x64, 0x00, 0x00, 0x00, 0x08, 0x6c, 0x6f, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, 0x00,
+ 0x0c, 0x6d, 0x61, 0x78, 0x2d, 0x66, 0x6f, 0x72,
+ 0x77, 0x61, 0x72, 0x64, 0x73, 0x00, 0x00, 0x00,
+ 0x06, 0x70, 0x72, 0x61, 0x67, 0x6d, 0x61, 0x00,
+ 0x00, 0x00, 0x12, 0x70, 0x72, 0x6f, 0x78, 0x79,
+ 0x2d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74,
+ 0x69, 0x63, 0x61, 0x74, 0x65, 0x00, 0x00, 0x00,
+ 0x13, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2d, 0x61,
+ 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, 0x00, 0x05,
+ 0x72, 0x61, 0x6e, 0x67, 0x65, 0x00, 0x00, 0x00,
+ 0x07, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x72,
+ 0x00, 0x00, 0x00, 0x0b, 0x72, 0x65, 0x74, 0x72,
+ 0x79, 0x2d, 0x61, 0x66, 0x74, 0x65, 0x72, 0x00,
+ 0x00, 0x00, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65,
+ 0x72, 0x00, 0x00, 0x00, 0x02, 0x74, 0x65, 0x00,
+ 0x00, 0x00, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c,
+ 0x65, 0x72, 0x00, 0x00, 0x00, 0x11, 0x74, 0x72,
+ 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2d, 0x65,
+ 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x00,
+ 0x00, 0x00, 0x07, 0x75, 0x70, 0x67, 0x72, 0x61,
+ 0x64, 0x65, 0x00, 0x00, 0x00, 0x0a, 0x75, 0x73,
+ 0x65, 0x72, 0x2d, 0x61, 0x67, 0x65, 0x6e, 0x74,
+ 0x00, 0x00, 0x00, 0x04, 0x76, 0x61, 0x72, 0x79,
+ 0x00, 0x00, 0x00, 0x03, 0x76, 0x69, 0x61, 0x00,
+ 0x00, 0x00, 0x07, 0x77, 0x61, 0x72, 0x6e, 0x69,
+ 0x6e, 0x67, 0x00, 0x00, 0x00, 0x10, 0x77, 0x77,
+ 0x77, 0x2d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e,
+ 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x00, 0x00,
+ 0x00, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64,
+ 0x00, 0x00, 0x00, 0x03, 0x67, 0x65, 0x74, 0x00,
+ 0x00, 0x00, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75,
+ 0x73, 0x00, 0x00, 0x00, 0x06, 0x32, 0x30, 0x30,
+ 0x20, 0x4f, 0x4b, 0x00, 0x00, 0x00, 0x07, 0x76,
+ 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x00, 0x00,
+ 0x00, 0x08, 0x48, 0x54, 0x54, 0x50, 0x2f, 0x31,
+ 0x2e, 0x31, 0x00, 0x00, 0x00, 0x03, 0x75, 0x72,
+ 0x6c, 0x00, 0x00, 0x00, 0x06, 0x70, 0x75, 0x62,
+ 0x6c, 0x69, 0x63, 0x00, 0x00, 0x00, 0x0a, 0x73,
+ 0x65, 0x74, 0x2d, 0x63, 0x6f, 0x6f, 0x6b, 0x69,
+ 0x65, 0x00, 0x00, 0x00, 0x0a, 0x6b, 0x65, 0x65,
+ 0x70, 0x2d, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x00,
+ 0x00, 0x00, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69,
+ 0x6e, 0x31, 0x30, 0x30, 0x31, 0x30, 0x31, 0x32,
+ 0x30, 0x31, 0x32, 0x30, 0x32, 0x32, 0x30, 0x35,
+ 0x32, 0x30, 0x36, 0x33, 0x30, 0x30, 0x33, 0x30,
+ 0x32, 0x33, 0x30, 0x33, 0x33, 0x30, 0x34, 0x33,
+ 0x30, 0x35, 0x33, 0x30, 0x36, 0x33, 0x30, 0x37,
+ 0x34, 0x30, 0x32, 0x34, 0x30, 0x35, 0x34, 0x30,
+ 0x36, 0x34, 0x30, 0x37, 0x34, 0x30, 0x38, 0x34,
+ 0x30, 0x39, 0x34, 0x31, 0x30, 0x34, 0x31, 0x31,
+ 0x34, 0x31, 0x32, 0x34, 0x31, 0x33, 0x34, 0x31,
+ 0x34, 0x34, 0x31, 0x35, 0x34, 0x31, 0x36, 0x34,
+ 0x31, 0x37, 0x35, 0x30, 0x32, 0x35, 0x30, 0x34,
+ 0x35, 0x30, 0x35, 0x32, 0x30, 0x33, 0x20, 0x4e,
+ 0x6f, 0x6e, 0x2d, 0x41, 0x75, 0x74, 0x68, 0x6f,
+ 0x72, 0x69, 0x74, 0x61, 0x74, 0x69, 0x76, 0x65,
+ 0x20, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x32, 0x30, 0x34, 0x20,
+ 0x4e, 0x6f, 0x20, 0x43, 0x6f, 0x6e, 0x74, 0x65,
+ 0x6e, 0x74, 0x33, 0x30, 0x31, 0x20, 0x4d, 0x6f,
+ 0x76, 0x65, 0x64, 0x20, 0x50, 0x65, 0x72, 0x6d,
+ 0x61, 0x6e, 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x34,
+ 0x30, 0x30, 0x20, 0x42, 0x61, 0x64, 0x20, 0x52,
+ 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x34, 0x30,
+ 0x31, 0x20, 0x55, 0x6e, 0x61, 0x75, 0x74, 0x68,
+ 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x34, 0x30,
+ 0x33, 0x20, 0x46, 0x6f, 0x72, 0x62, 0x69, 0x64,
+ 0x64, 0x65, 0x6e, 0x34, 0x30, 0x34, 0x20, 0x4e,
+ 0x6f, 0x74, 0x20, 0x46, 0x6f, 0x75, 0x6e, 0x64,
+ 0x35, 0x30, 0x30, 0x20, 0x49, 0x6e, 0x74, 0x65,
+ 0x72, 0x6e, 0x61, 0x6c, 0x20, 0x53, 0x65, 0x72,
+ 0x76, 0x65, 0x72, 0x20, 0x45, 0x72, 0x72, 0x6f,
+ 0x72, 0x35, 0x30, 0x31, 0x20, 0x4e, 0x6f, 0x74,
+ 0x20, 0x49, 0x6d, 0x70, 0x6c, 0x65, 0x6d, 0x65,
+ 0x6e, 0x74, 0x65, 0x64, 0x35, 0x30, 0x33, 0x20,
+ 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x20,
+ 0x55, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61,
+ 0x62, 0x6c, 0x65, 0x4a, 0x61, 0x6e, 0x20, 0x46,
+ 0x65, 0x62, 0x20, 0x4d, 0x61, 0x72, 0x20, 0x41,
+ 0x70, 0x72, 0x20, 0x4d, 0x61, 0x79, 0x20, 0x4a,
+ 0x75, 0x6e, 0x20, 0x4a, 0x75, 0x6c, 0x20, 0x41,
+ 0x75, 0x67, 0x20, 0x53, 0x65, 0x70, 0x74, 0x20,
+ 0x4f, 0x63, 0x74, 0x20, 0x4e, 0x6f, 0x76, 0x20,
+ 0x44, 0x65, 0x63, 0x20, 0x30, 0x30, 0x3a, 0x30,
+ 0x30, 0x3a, 0x30, 0x30, 0x20, 0x4d, 0x6f, 0x6e,
+ 0x2c, 0x20, 0x54, 0x75, 0x65, 0x2c, 0x20, 0x57,
+ 0x65, 0x64, 0x2c, 0x20, 0x54, 0x68, 0x75, 0x2c,
+ 0x20, 0x46, 0x72, 0x69, 0x2c, 0x20, 0x53, 0x61,
+ 0x74, 0x2c, 0x20, 0x53, 0x75, 0x6e, 0x2c, 0x20,
+ 0x47, 0x4d, 0x54, 0x63, 0x68, 0x75, 0x6e, 0x6b,
+ 0x65, 0x64, 0x2c, 0x74, 0x65, 0x78, 0x74, 0x2f,
+ 0x68, 0x74, 0x6d, 0x6c, 0x2c, 0x69, 0x6d, 0x61,
+ 0x67, 0x65, 0x2f, 0x70, 0x6e, 0x67, 0x2c, 0x69,
+ 0x6d, 0x61, 0x67, 0x65, 0x2f, 0x6a, 0x70, 0x67,
+ 0x2c, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x2f, 0x67,
+ 0x69, 0x66, 0x2c, 0x61, 0x70, 0x70, 0x6c, 0x69,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x78,
+ 0x6d, 0x6c, 0x2c, 0x61, 0x70, 0x70, 0x6c, 0x69,
+ 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x78,
+ 0x68, 0x74, 0x6d, 0x6c, 0x2b, 0x78, 0x6d, 0x6c,
+ 0x2c, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x70, 0x6c,
+ 0x61, 0x69, 0x6e, 0x2c, 0x74, 0x65, 0x78, 0x74,
+ 0x2f, 0x6a, 0x61, 0x76, 0x61, 0x73, 0x63, 0x72,
+ 0x69, 0x70, 0x74, 0x2c, 0x70, 0x75, 0x62, 0x6c,
+ 0x69, 0x63, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74,
+ 0x65, 0x6d, 0x61, 0x78, 0x2d, 0x61, 0x67, 0x65,
+ 0x3d, 0x67, 0x7a, 0x69, 0x70, 0x2c, 0x64, 0x65,
+ 0x66, 0x6c, 0x61, 0x74, 0x65, 0x2c, 0x73, 0x64,
+ 0x63, 0x68, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65,
+ 0x74, 0x3d, 0x75, 0x74, 0x66, 0x2d, 0x38, 0x63,
+ 0x68, 0x61, 0x72, 0x73, 0x65, 0x74, 0x3d, 0x69,
+ 0x73, 0x6f, 0x2d, 0x38, 0x38, 0x35, 0x39, 0x2d,
+ 0x31, 0x2c, 0x75, 0x74, 0x66, 0x2d, 0x2c, 0x2a,
+ 0x2c, 0x65, 0x6e, 0x71, 0x3d, 0x30, 0x2e,
+}
diff --git a/vendor/github.com/docker/spdystream/spdy/read.go b/vendor/github.com/docker/spdystream/spdy/read.go
new file mode 100644
index 00000000000..9359a95015c
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/spdy/read.go
@@ -0,0 +1,348 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package spdy
+
+import (
+ "compress/zlib"
+ "encoding/binary"
+ "io"
+ "net/http"
+ "strings"
+)
+
+func (frame *SynStreamFrame) read(h ControlFrameHeader, f *Framer) error {
+ return f.readSynStreamFrame(h, frame)
+}
+
+func (frame *SynReplyFrame) read(h ControlFrameHeader, f *Framer) error {
+ return f.readSynReplyFrame(h, frame)
+}
+
+func (frame *RstStreamFrame) read(h ControlFrameHeader, f *Framer) error {
+ frame.CFHeader = h
+ if err := binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
+ return err
+ }
+ if err := binary.Read(f.r, binary.BigEndian, &frame.Status); err != nil {
+ return err
+ }
+ if frame.Status == 0 {
+ return &Error{InvalidControlFrame, frame.StreamId}
+ }
+ if frame.StreamId == 0 {
+ return &Error{ZeroStreamId, 0}
+ }
+ return nil
+}
+
+func (frame *SettingsFrame) read(h ControlFrameHeader, f *Framer) error {
+ frame.CFHeader = h
+ var numSettings uint32
+ if err := binary.Read(f.r, binary.BigEndian, &numSettings); err != nil {
+ return err
+ }
+ frame.FlagIdValues = make([]SettingsFlagIdValue, numSettings)
+ for i := uint32(0); i < numSettings; i++ {
+ if err := binary.Read(f.r, binary.BigEndian, &frame.FlagIdValues[i].Id); err != nil {
+ return err
+ }
+ frame.FlagIdValues[i].Flag = SettingsFlag((frame.FlagIdValues[i].Id & 0xff000000) >> 24)
+ frame.FlagIdValues[i].Id &= 0xffffff
+ if err := binary.Read(f.r, binary.BigEndian, &frame.FlagIdValues[i].Value); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func (frame *PingFrame) read(h ControlFrameHeader, f *Framer) error {
+ frame.CFHeader = h
+ if err := binary.Read(f.r, binary.BigEndian, &frame.Id); err != nil {
+ return err
+ }
+ if frame.Id == 0 {
+ return &Error{ZeroStreamId, 0}
+ }
+ if frame.CFHeader.Flags != 0 {
+ return &Error{InvalidControlFrame, StreamId(frame.Id)}
+ }
+ return nil
+}
+
+func (frame *GoAwayFrame) read(h ControlFrameHeader, f *Framer) error {
+ frame.CFHeader = h
+ if err := binary.Read(f.r, binary.BigEndian, &frame.LastGoodStreamId); err != nil {
+ return err
+ }
+ if frame.CFHeader.Flags != 0 {
+ return &Error{InvalidControlFrame, frame.LastGoodStreamId}
+ }
+ if frame.CFHeader.length != 8 {
+ return &Error{InvalidControlFrame, frame.LastGoodStreamId}
+ }
+ if err := binary.Read(f.r, binary.BigEndian, &frame.Status); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (frame *HeadersFrame) read(h ControlFrameHeader, f *Framer) error {
+ return f.readHeadersFrame(h, frame)
+}
+
+func (frame *WindowUpdateFrame) read(h ControlFrameHeader, f *Framer) error {
+ frame.CFHeader = h
+ if err := binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
+ return err
+ }
+ if frame.CFHeader.Flags != 0 {
+ return &Error{InvalidControlFrame, frame.StreamId}
+ }
+ if frame.CFHeader.length != 8 {
+ return &Error{InvalidControlFrame, frame.StreamId}
+ }
+ if err := binary.Read(f.r, binary.BigEndian, &frame.DeltaWindowSize); err != nil {
+ return err
+ }
+ return nil
+}
+
+func newControlFrame(frameType ControlFrameType) (controlFrame, error) {
+ ctor, ok := cframeCtor[frameType]
+ if !ok {
+ return nil, &Error{Err: InvalidControlFrame}
+ }
+ return ctor(), nil
+}
+
+var cframeCtor = map[ControlFrameType]func() controlFrame{
+ TypeSynStream: func() controlFrame { return new(SynStreamFrame) },
+ TypeSynReply: func() controlFrame { return new(SynReplyFrame) },
+ TypeRstStream: func() controlFrame { return new(RstStreamFrame) },
+ TypeSettings: func() controlFrame { return new(SettingsFrame) },
+ TypePing: func() controlFrame { return new(PingFrame) },
+ TypeGoAway: func() controlFrame { return new(GoAwayFrame) },
+ TypeHeaders: func() controlFrame { return new(HeadersFrame) },
+ TypeWindowUpdate: func() controlFrame { return new(WindowUpdateFrame) },
+}
+
+func (f *Framer) uncorkHeaderDecompressor(payloadSize int64) error {
+ if f.headerDecompressor != nil {
+ f.headerReader.N = payloadSize
+ return nil
+ }
+ f.headerReader = io.LimitedReader{R: f.r, N: payloadSize}
+ decompressor, err := zlib.NewReaderDict(&f.headerReader, []byte(headerDictionary))
+ if err != nil {
+ return err
+ }
+ f.headerDecompressor = decompressor
+ return nil
+}
+
+// ReadFrame reads SPDY encoded data and returns a decompressed Frame.
+func (f *Framer) ReadFrame() (Frame, error) {
+ var firstWord uint32
+ if err := binary.Read(f.r, binary.BigEndian, &firstWord); err != nil {
+ return nil, err
+ }
+ if firstWord&0x80000000 != 0 {
+ frameType := ControlFrameType(firstWord & 0xffff)
+ version := uint16(firstWord >> 16 & 0x7fff)
+ return f.parseControlFrame(version, frameType)
+ }
+ return f.parseDataFrame(StreamId(firstWord & 0x7fffffff))
+}
+
+func (f *Framer) parseControlFrame(version uint16, frameType ControlFrameType) (Frame, error) {
+ var length uint32
+ if err := binary.Read(f.r, binary.BigEndian, &length); err != nil {
+ return nil, err
+ }
+ flags := ControlFlags((length & 0xff000000) >> 24)
+ length &= 0xffffff
+ header := ControlFrameHeader{version, frameType, flags, length}
+ cframe, err := newControlFrame(frameType)
+ if err != nil {
+ return nil, err
+ }
+ if err = cframe.read(header, f); err != nil {
+ return nil, err
+ }
+ return cframe, nil
+}
+
+func parseHeaderValueBlock(r io.Reader, streamId StreamId) (http.Header, error) {
+ var numHeaders uint32
+ if err := binary.Read(r, binary.BigEndian, &numHeaders); err != nil {
+ return nil, err
+ }
+ var e error
+ h := make(http.Header, int(numHeaders))
+ for i := 0; i < int(numHeaders); i++ {
+ var length uint32
+ if err := binary.Read(r, binary.BigEndian, &length); err != nil {
+ return nil, err
+ }
+ nameBytes := make([]byte, length)
+ if _, err := io.ReadFull(r, nameBytes); err != nil {
+ return nil, err
+ }
+ name := string(nameBytes)
+ if name != strings.ToLower(name) {
+ e = &Error{UnlowercasedHeaderName, streamId}
+ name = strings.ToLower(name)
+ }
+ if h[name] != nil {
+ e = &Error{DuplicateHeaders, streamId}
+ }
+ if err := binary.Read(r, binary.BigEndian, &length); err != nil {
+ return nil, err
+ }
+ value := make([]byte, length)
+ if _, err := io.ReadFull(r, value); err != nil {
+ return nil, err
+ }
+ valueList := strings.Split(string(value), headerValueSeparator)
+ for _, v := range valueList {
+ h.Add(name, v)
+ }
+ }
+ if e != nil {
+ return h, e
+ }
+ return h, nil
+}
+
+func (f *Framer) readSynStreamFrame(h ControlFrameHeader, frame *SynStreamFrame) error {
+ frame.CFHeader = h
+ var err error
+ if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
+ return err
+ }
+ if err = binary.Read(f.r, binary.BigEndian, &frame.AssociatedToStreamId); err != nil {
+ return err
+ }
+ if err = binary.Read(f.r, binary.BigEndian, &frame.Priority); err != nil {
+ return err
+ }
+ frame.Priority >>= 5
+ if err = binary.Read(f.r, binary.BigEndian, &frame.Slot); err != nil {
+ return err
+ }
+ reader := f.r
+ if !f.headerCompressionDisabled {
+ err := f.uncorkHeaderDecompressor(int64(h.length - 10))
+ if err != nil {
+ return err
+ }
+ reader = f.headerDecompressor
+ }
+ frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId)
+ if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) {
+ err = &Error{WrongCompressedPayloadSize, 0}
+ }
+ if err != nil {
+ return err
+ }
+ for h := range frame.Headers {
+ if invalidReqHeaders[h] {
+ return &Error{InvalidHeaderPresent, frame.StreamId}
+ }
+ }
+ if frame.StreamId == 0 {
+ return &Error{ZeroStreamId, 0}
+ }
+ return nil
+}
+
+func (f *Framer) readSynReplyFrame(h ControlFrameHeader, frame *SynReplyFrame) error {
+ frame.CFHeader = h
+ var err error
+ if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
+ return err
+ }
+ reader := f.r
+ if !f.headerCompressionDisabled {
+ err := f.uncorkHeaderDecompressor(int64(h.length - 4))
+ if err != nil {
+ return err
+ }
+ reader = f.headerDecompressor
+ }
+ frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId)
+ if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) {
+ err = &Error{WrongCompressedPayloadSize, 0}
+ }
+ if err != nil {
+ return err
+ }
+ for h := range frame.Headers {
+ if invalidRespHeaders[h] {
+ return &Error{InvalidHeaderPresent, frame.StreamId}
+ }
+ }
+ if frame.StreamId == 0 {
+ return &Error{ZeroStreamId, 0}
+ }
+ return nil
+}
+
+func (f *Framer) readHeadersFrame(h ControlFrameHeader, frame *HeadersFrame) error {
+ frame.CFHeader = h
+ var err error
+ if err = binary.Read(f.r, binary.BigEndian, &frame.StreamId); err != nil {
+ return err
+ }
+ reader := f.r
+ if !f.headerCompressionDisabled {
+ err := f.uncorkHeaderDecompressor(int64(h.length - 4))
+ if err != nil {
+ return err
+ }
+ reader = f.headerDecompressor
+ }
+ frame.Headers, err = parseHeaderValueBlock(reader, frame.StreamId)
+ if !f.headerCompressionDisabled && (err == io.EOF && f.headerReader.N == 0 || f.headerReader.N != 0) {
+ err = &Error{WrongCompressedPayloadSize, 0}
+ }
+ if err != nil {
+ return err
+ }
+ var invalidHeaders map[string]bool
+ if frame.StreamId%2 == 0 {
+ invalidHeaders = invalidReqHeaders
+ } else {
+ invalidHeaders = invalidRespHeaders
+ }
+ for h := range frame.Headers {
+ if invalidHeaders[h] {
+ return &Error{InvalidHeaderPresent, frame.StreamId}
+ }
+ }
+ if frame.StreamId == 0 {
+ return &Error{ZeroStreamId, 0}
+ }
+ return nil
+}
+
+func (f *Framer) parseDataFrame(streamId StreamId) (*DataFrame, error) {
+ var length uint32
+ if err := binary.Read(f.r, binary.BigEndian, &length); err != nil {
+ return nil, err
+ }
+ var frame DataFrame
+ frame.StreamId = streamId
+ frame.Flags = DataFlags(length >> 24)
+ length &= 0xffffff
+ frame.Data = make([]byte, length)
+ if _, err := io.ReadFull(f.r, frame.Data); err != nil {
+ return nil, err
+ }
+ if frame.StreamId == 0 {
+ return nil, &Error{ZeroStreamId, 0}
+ }
+ return &frame, nil
+}
diff --git a/vendor/github.com/docker/spdystream/spdy/types.go b/vendor/github.com/docker/spdystream/spdy/types.go
new file mode 100644
index 00000000000..7b6ee9c6f2b
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/spdy/types.go
@@ -0,0 +1,275 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package spdy implements the SPDY protocol (currently SPDY/3), described in
+// http://www.chromium.org/spdy/spdy-protocol/spdy-protocol-draft3.
+package spdy
+
+import (
+ "bytes"
+ "compress/zlib"
+ "io"
+ "net/http"
+)
+
+// Version is the protocol version number that this package implements.
+const Version = 3
+
+// ControlFrameType stores the type field in a control frame header.
+type ControlFrameType uint16
+
+const (
+ TypeSynStream ControlFrameType = 0x0001
+ TypeSynReply = 0x0002
+ TypeRstStream = 0x0003
+ TypeSettings = 0x0004
+ TypePing = 0x0006
+ TypeGoAway = 0x0007
+ TypeHeaders = 0x0008
+ TypeWindowUpdate = 0x0009
+)
+
+// ControlFlags are the flags that can be set on a control frame.
+type ControlFlags uint8
+
+const (
+ ControlFlagFin ControlFlags = 0x01
+ ControlFlagUnidirectional = 0x02
+ ControlFlagSettingsClearSettings = 0x01
+)
+
+// DataFlags are the flags that can be set on a data frame.
+type DataFlags uint8
+
+const (
+ DataFlagFin DataFlags = 0x01
+)
+
+// MaxDataLength is the maximum number of bytes that can be stored in one frame.
+const MaxDataLength = 1<<24 - 1
+
+// headerValueSepator separates multiple header values.
+const headerValueSeparator = "\x00"
+
+// Frame is a single SPDY frame in its unpacked in-memory representation. Use
+// Framer to read and write it.
+type Frame interface {
+ write(f *Framer) error
+}
+
+// ControlFrameHeader contains all the fields in a control frame header,
+// in its unpacked in-memory representation.
+type ControlFrameHeader struct {
+ // Note, high bit is the "Control" bit.
+ version uint16 // spdy version number
+ frameType ControlFrameType
+ Flags ControlFlags
+ length uint32 // length of data field
+}
+
+type controlFrame interface {
+ Frame
+ read(h ControlFrameHeader, f *Framer) error
+}
+
+// StreamId represents a 31-bit value identifying the stream.
+type StreamId uint32
+
+// SynStreamFrame is the unpacked, in-memory representation of a SYN_STREAM
+// frame.
+type SynStreamFrame struct {
+ CFHeader ControlFrameHeader
+ StreamId StreamId
+ AssociatedToStreamId StreamId // stream id for a stream which this stream is associated to
+ Priority uint8 // priority of this frame (3-bit)
+ Slot uint8 // index in the server's credential vector of the client certificate
+ Headers http.Header
+}
+
+// SynReplyFrame is the unpacked, in-memory representation of a SYN_REPLY frame.
+type SynReplyFrame struct {
+ CFHeader ControlFrameHeader
+ StreamId StreamId
+ Headers http.Header
+}
+
+// RstStreamStatus represents the status that led to a RST_STREAM.
+type RstStreamStatus uint32
+
+const (
+ ProtocolError RstStreamStatus = iota + 1
+ InvalidStream
+ RefusedStream
+ UnsupportedVersion
+ Cancel
+ InternalError
+ FlowControlError
+ StreamInUse
+ StreamAlreadyClosed
+ InvalidCredentials
+ FrameTooLarge
+)
+
+// RstStreamFrame is the unpacked, in-memory representation of a RST_STREAM
+// frame.
+type RstStreamFrame struct {
+ CFHeader ControlFrameHeader
+ StreamId StreamId
+ Status RstStreamStatus
+}
+
+// SettingsFlag represents a flag in a SETTINGS frame.
+type SettingsFlag uint8
+
+const (
+ FlagSettingsPersistValue SettingsFlag = 0x1
+ FlagSettingsPersisted = 0x2
+)
+
+// SettingsFlag represents the id of an id/value pair in a SETTINGS frame.
+type SettingsId uint32
+
+const (
+ SettingsUploadBandwidth SettingsId = iota + 1
+ SettingsDownloadBandwidth
+ SettingsRoundTripTime
+ SettingsMaxConcurrentStreams
+ SettingsCurrentCwnd
+ SettingsDownloadRetransRate
+ SettingsInitialWindowSize
+ SettingsClientCretificateVectorSize
+)
+
+// SettingsFlagIdValue is the unpacked, in-memory representation of the
+// combined flag/id/value for a setting in a SETTINGS frame.
+type SettingsFlagIdValue struct {
+ Flag SettingsFlag
+ Id SettingsId
+ Value uint32
+}
+
+// SettingsFrame is the unpacked, in-memory representation of a SPDY
+// SETTINGS frame.
+type SettingsFrame struct {
+ CFHeader ControlFrameHeader
+ FlagIdValues []SettingsFlagIdValue
+}
+
+// PingFrame is the unpacked, in-memory representation of a PING frame.
+type PingFrame struct {
+ CFHeader ControlFrameHeader
+ Id uint32 // unique id for this ping, from server is even, from client is odd.
+}
+
+// GoAwayStatus represents the status in a GoAwayFrame.
+type GoAwayStatus uint32
+
+const (
+ GoAwayOK GoAwayStatus = iota
+ GoAwayProtocolError
+ GoAwayInternalError
+)
+
+// GoAwayFrame is the unpacked, in-memory representation of a GOAWAY frame.
+type GoAwayFrame struct {
+ CFHeader ControlFrameHeader
+ LastGoodStreamId StreamId // last stream id which was accepted by sender
+ Status GoAwayStatus
+}
+
+// HeadersFrame is the unpacked, in-memory representation of a HEADERS frame.
+type HeadersFrame struct {
+ CFHeader ControlFrameHeader
+ StreamId StreamId
+ Headers http.Header
+}
+
+// WindowUpdateFrame is the unpacked, in-memory representation of a
+// WINDOW_UPDATE frame.
+type WindowUpdateFrame struct {
+ CFHeader ControlFrameHeader
+ StreamId StreamId
+ DeltaWindowSize uint32 // additional number of bytes to existing window size
+}
+
+// TODO: Implement credential frame and related methods.
+
+// DataFrame is the unpacked, in-memory representation of a DATA frame.
+type DataFrame struct {
+ // Note, high bit is the "Control" bit. Should be 0 for data frames.
+ StreamId StreamId
+ Flags DataFlags
+ Data []byte // payload data of this frame
+}
+
+// A SPDY specific error.
+type ErrorCode string
+
+const (
+ UnlowercasedHeaderName ErrorCode = "header was not lowercased"
+ DuplicateHeaders = "multiple headers with same name"
+ WrongCompressedPayloadSize = "compressed payload size was incorrect"
+ UnknownFrameType = "unknown frame type"
+ InvalidControlFrame = "invalid control frame"
+ InvalidDataFrame = "invalid data frame"
+ InvalidHeaderPresent = "frame contained invalid header"
+ ZeroStreamId = "stream id zero is disallowed"
+)
+
+// Error contains both the type of error and additional values. StreamId is 0
+// if Error is not associated with a stream.
+type Error struct {
+ Err ErrorCode
+ StreamId StreamId
+}
+
+func (e *Error) Error() string {
+ return string(e.Err)
+}
+
+var invalidReqHeaders = map[string]bool{
+ "Connection": true,
+ "Host": true,
+ "Keep-Alive": true,
+ "Proxy-Connection": true,
+ "Transfer-Encoding": true,
+}
+
+var invalidRespHeaders = map[string]bool{
+ "Connection": true,
+ "Keep-Alive": true,
+ "Proxy-Connection": true,
+ "Transfer-Encoding": true,
+}
+
+// Framer handles serializing/deserializing SPDY frames, including compressing/
+// decompressing payloads.
+type Framer struct {
+ headerCompressionDisabled bool
+ w io.Writer
+ headerBuf *bytes.Buffer
+ headerCompressor *zlib.Writer
+ r io.Reader
+ headerReader io.LimitedReader
+ headerDecompressor io.ReadCloser
+}
+
+// NewFramer allocates a new Framer for a given SPDY connection, represented by
+// a io.Writer and io.Reader. Note that Framer will read and write individual fields
+// from/to the Reader and Writer, so the caller should pass in an appropriately
+// buffered implementation to optimize performance.
+func NewFramer(w io.Writer, r io.Reader) (*Framer, error) {
+ compressBuf := new(bytes.Buffer)
+ compressor, err := zlib.NewWriterLevelDict(compressBuf, zlib.BestCompression, []byte(headerDictionary))
+ if err != nil {
+ return nil, err
+ }
+ framer := &Framer{
+ w: w,
+ headerBuf: compressBuf,
+ headerCompressor: compressor,
+ r: r,
+ }
+ return framer, nil
+}
diff --git a/vendor/github.com/docker/spdystream/spdy/write.go b/vendor/github.com/docker/spdystream/spdy/write.go
new file mode 100644
index 00000000000..b212f66a235
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/spdy/write.go
@@ -0,0 +1,318 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package spdy
+
+import (
+ "encoding/binary"
+ "io"
+ "net/http"
+ "strings"
+)
+
+func (frame *SynStreamFrame) write(f *Framer) error {
+ return f.writeSynStreamFrame(frame)
+}
+
+func (frame *SynReplyFrame) write(f *Framer) error {
+ return f.writeSynReplyFrame(frame)
+}
+
+func (frame *RstStreamFrame) write(f *Framer) (err error) {
+ if frame.StreamId == 0 {
+ return &Error{ZeroStreamId, 0}
+ }
+ frame.CFHeader.version = Version
+ frame.CFHeader.frameType = TypeRstStream
+ frame.CFHeader.Flags = 0
+ frame.CFHeader.length = 8
+
+ // Serialize frame to Writer.
+ if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
+ return
+ }
+ if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
+ return
+ }
+ if frame.Status == 0 {
+ return &Error{InvalidControlFrame, frame.StreamId}
+ }
+ if err = binary.Write(f.w, binary.BigEndian, frame.Status); err != nil {
+ return
+ }
+ return
+}
+
+func (frame *SettingsFrame) write(f *Framer) (err error) {
+ frame.CFHeader.version = Version
+ frame.CFHeader.frameType = TypeSettings
+ frame.CFHeader.length = uint32(len(frame.FlagIdValues)*8 + 4)
+
+ // Serialize frame to Writer.
+ if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
+ return
+ }
+ if err = binary.Write(f.w, binary.BigEndian, uint32(len(frame.FlagIdValues))); err != nil {
+ return
+ }
+ for _, flagIdValue := range frame.FlagIdValues {
+ flagId := uint32(flagIdValue.Flag)<<24 | uint32(flagIdValue.Id)
+ if err = binary.Write(f.w, binary.BigEndian, flagId); err != nil {
+ return
+ }
+ if err = binary.Write(f.w, binary.BigEndian, flagIdValue.Value); err != nil {
+ return
+ }
+ }
+ return
+}
+
+func (frame *PingFrame) write(f *Framer) (err error) {
+ if frame.Id == 0 {
+ return &Error{ZeroStreamId, 0}
+ }
+ frame.CFHeader.version = Version
+ frame.CFHeader.frameType = TypePing
+ frame.CFHeader.Flags = 0
+ frame.CFHeader.length = 4
+
+ // Serialize frame to Writer.
+ if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
+ return
+ }
+ if err = binary.Write(f.w, binary.BigEndian, frame.Id); err != nil {
+ return
+ }
+ return
+}
+
+func (frame *GoAwayFrame) write(f *Framer) (err error) {
+ frame.CFHeader.version = Version
+ frame.CFHeader.frameType = TypeGoAway
+ frame.CFHeader.Flags = 0
+ frame.CFHeader.length = 8
+
+ // Serialize frame to Writer.
+ if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
+ return
+ }
+ if err = binary.Write(f.w, binary.BigEndian, frame.LastGoodStreamId); err != nil {
+ return
+ }
+ if err = binary.Write(f.w, binary.BigEndian, frame.Status); err != nil {
+ return
+ }
+ return nil
+}
+
+func (frame *HeadersFrame) write(f *Framer) error {
+ return f.writeHeadersFrame(frame)
+}
+
+func (frame *WindowUpdateFrame) write(f *Framer) (err error) {
+ frame.CFHeader.version = Version
+ frame.CFHeader.frameType = TypeWindowUpdate
+ frame.CFHeader.Flags = 0
+ frame.CFHeader.length = 8
+
+ // Serialize frame to Writer.
+ if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
+ return
+ }
+ if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
+ return
+ }
+ if err = binary.Write(f.w, binary.BigEndian, frame.DeltaWindowSize); err != nil {
+ return
+ }
+ return nil
+}
+
+func (frame *DataFrame) write(f *Framer) error {
+ return f.writeDataFrame(frame)
+}
+
+// WriteFrame writes a frame.
+func (f *Framer) WriteFrame(frame Frame) error {
+ return frame.write(f)
+}
+
+func writeControlFrameHeader(w io.Writer, h ControlFrameHeader) error {
+ if err := binary.Write(w, binary.BigEndian, 0x8000|h.version); err != nil {
+ return err
+ }
+ if err := binary.Write(w, binary.BigEndian, h.frameType); err != nil {
+ return err
+ }
+ flagsAndLength := uint32(h.Flags)<<24 | h.length
+ if err := binary.Write(w, binary.BigEndian, flagsAndLength); err != nil {
+ return err
+ }
+ return nil
+}
+
+func writeHeaderValueBlock(w io.Writer, h http.Header) (n int, err error) {
+ n = 0
+ if err = binary.Write(w, binary.BigEndian, uint32(len(h))); err != nil {
+ return
+ }
+ n += 2
+ for name, values := range h {
+ if err = binary.Write(w, binary.BigEndian, uint32(len(name))); err != nil {
+ return
+ }
+ n += 2
+ name = strings.ToLower(name)
+ if _, err = io.WriteString(w, name); err != nil {
+ return
+ }
+ n += len(name)
+ v := strings.Join(values, headerValueSeparator)
+ if err = binary.Write(w, binary.BigEndian, uint32(len(v))); err != nil {
+ return
+ }
+ n += 2
+ if _, err = io.WriteString(w, v); err != nil {
+ return
+ }
+ n += len(v)
+ }
+ return
+}
+
+func (f *Framer) writeSynStreamFrame(frame *SynStreamFrame) (err error) {
+ if frame.StreamId == 0 {
+ return &Error{ZeroStreamId, 0}
+ }
+ // Marshal the headers.
+ var writer io.Writer = f.headerBuf
+ if !f.headerCompressionDisabled {
+ writer = f.headerCompressor
+ }
+ if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil {
+ return
+ }
+ if !f.headerCompressionDisabled {
+ f.headerCompressor.Flush()
+ }
+
+ // Set ControlFrameHeader.
+ frame.CFHeader.version = Version
+ frame.CFHeader.frameType = TypeSynStream
+ frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 10)
+
+ // Serialize frame to Writer.
+ if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
+ return err
+ }
+ if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
+ return err
+ }
+ if err = binary.Write(f.w, binary.BigEndian, frame.AssociatedToStreamId); err != nil {
+ return err
+ }
+ if err = binary.Write(f.w, binary.BigEndian, frame.Priority<<5); err != nil {
+ return err
+ }
+ if err = binary.Write(f.w, binary.BigEndian, frame.Slot); err != nil {
+ return err
+ }
+ if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil {
+ return err
+ }
+ f.headerBuf.Reset()
+ return nil
+}
+
+func (f *Framer) writeSynReplyFrame(frame *SynReplyFrame) (err error) {
+ if frame.StreamId == 0 {
+ return &Error{ZeroStreamId, 0}
+ }
+ // Marshal the headers.
+ var writer io.Writer = f.headerBuf
+ if !f.headerCompressionDisabled {
+ writer = f.headerCompressor
+ }
+ if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil {
+ return
+ }
+ if !f.headerCompressionDisabled {
+ f.headerCompressor.Flush()
+ }
+
+ // Set ControlFrameHeader.
+ frame.CFHeader.version = Version
+ frame.CFHeader.frameType = TypeSynReply
+ frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 4)
+
+ // Serialize frame to Writer.
+ if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
+ return
+ }
+ if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
+ return
+ }
+ if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil {
+ return
+ }
+ f.headerBuf.Reset()
+ return
+}
+
+func (f *Framer) writeHeadersFrame(frame *HeadersFrame) (err error) {
+ if frame.StreamId == 0 {
+ return &Error{ZeroStreamId, 0}
+ }
+ // Marshal the headers.
+ var writer io.Writer = f.headerBuf
+ if !f.headerCompressionDisabled {
+ writer = f.headerCompressor
+ }
+ if _, err = writeHeaderValueBlock(writer, frame.Headers); err != nil {
+ return
+ }
+ if !f.headerCompressionDisabled {
+ f.headerCompressor.Flush()
+ }
+
+ // Set ControlFrameHeader.
+ frame.CFHeader.version = Version
+ frame.CFHeader.frameType = TypeHeaders
+ frame.CFHeader.length = uint32(len(f.headerBuf.Bytes()) + 4)
+
+ // Serialize frame to Writer.
+ if err = writeControlFrameHeader(f.w, frame.CFHeader); err != nil {
+ return
+ }
+ if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
+ return
+ }
+ if _, err = f.w.Write(f.headerBuf.Bytes()); err != nil {
+ return
+ }
+ f.headerBuf.Reset()
+ return
+}
+
+func (f *Framer) writeDataFrame(frame *DataFrame) (err error) {
+ if frame.StreamId == 0 {
+ return &Error{ZeroStreamId, 0}
+ }
+ if frame.StreamId&0x80000000 != 0 || len(frame.Data) > MaxDataLength {
+ return &Error{InvalidDataFrame, frame.StreamId}
+ }
+
+ // Serialize frame to Writer.
+ if err = binary.Write(f.w, binary.BigEndian, frame.StreamId); err != nil {
+ return
+ }
+ flagsAndLength := uint32(frame.Flags)<<24 | uint32(len(frame.Data))
+ if err = binary.Write(f.w, binary.BigEndian, flagsAndLength); err != nil {
+ return
+ }
+ if _, err = f.w.Write(frame.Data); err != nil {
+ return
+ }
+ return nil
+}
diff --git a/vendor/github.com/docker/spdystream/stream.go b/vendor/github.com/docker/spdystream/stream.go
new file mode 100644
index 00000000000..f9e9ee267f8
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/stream.go
@@ -0,0 +1,327 @@
+package spdystream
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "sync"
+ "time"
+
+ "github.com/docker/spdystream/spdy"
+)
+
+var (
+ ErrUnreadPartialData = errors.New("unread partial data")
+)
+
+type Stream struct {
+ streamId spdy.StreamId
+ parent *Stream
+ conn *Connection
+ startChan chan error
+
+ dataLock sync.RWMutex
+ dataChan chan []byte
+ unread []byte
+
+ priority uint8
+ headers http.Header
+ headerChan chan http.Header
+ finishLock sync.Mutex
+ finished bool
+ replyCond *sync.Cond
+ replied bool
+ closeLock sync.Mutex
+ closeChan chan bool
+}
+
+// WriteData writes data to stream, sending a dataframe per call
+func (s *Stream) WriteData(data []byte, fin bool) error {
+ s.waitWriteReply()
+ var flags spdy.DataFlags
+
+ if fin {
+ flags = spdy.DataFlagFin
+ s.finishLock.Lock()
+ if s.finished {
+ s.finishLock.Unlock()
+ return ErrWriteClosedStream
+ }
+ s.finished = true
+ s.finishLock.Unlock()
+ }
+
+ dataFrame := &spdy.DataFrame{
+ StreamId: s.streamId,
+ Flags: flags,
+ Data: data,
+ }
+
+ debugMessage("(%p) (%d) Writing data frame", s, s.streamId)
+ return s.conn.framer.WriteFrame(dataFrame)
+}
+
+// Write writes bytes to a stream, calling write data for each call.
+func (s *Stream) Write(data []byte) (n int, err error) {
+ err = s.WriteData(data, false)
+ if err == nil {
+ n = len(data)
+ }
+ return
+}
+
+// Read reads bytes from a stream, a single read will never get more
+// than what is sent on a single data frame, but a multiple calls to
+// read may get data from the same data frame.
+func (s *Stream) Read(p []byte) (n int, err error) {
+ if s.unread == nil {
+ select {
+ case <-s.closeChan:
+ return 0, io.EOF
+ case read, ok := <-s.dataChan:
+ if !ok {
+ return 0, io.EOF
+ }
+ s.unread = read
+ }
+ }
+ n = copy(p, s.unread)
+ if n < len(s.unread) {
+ s.unread = s.unread[n:]
+ } else {
+ s.unread = nil
+ }
+ return
+}
+
+// ReadData reads an entire data frame and returns the byte array
+// from the data frame. If there is unread data from the result
+// of a Read call, this function will return an ErrUnreadPartialData.
+func (s *Stream) ReadData() ([]byte, error) {
+ debugMessage("(%p) Reading data from %d", s, s.streamId)
+ if s.unread != nil {
+ return nil, ErrUnreadPartialData
+ }
+ select {
+ case <-s.closeChan:
+ return nil, io.EOF
+ case read, ok := <-s.dataChan:
+ if !ok {
+ return nil, io.EOF
+ }
+ return read, nil
+ }
+}
+
+func (s *Stream) waitWriteReply() {
+ if s.replyCond != nil {
+ s.replyCond.L.Lock()
+ for !s.replied {
+ s.replyCond.Wait()
+ }
+ s.replyCond.L.Unlock()
+ }
+}
+
+// Wait waits for the stream to receive a reply.
+func (s *Stream) Wait() error {
+ return s.WaitTimeout(time.Duration(0))
+}
+
+// WaitTimeout waits for the stream to receive a reply or for timeout.
+// When the timeout is reached, ErrTimeout will be returned.
+func (s *Stream) WaitTimeout(timeout time.Duration) error {
+ var timeoutChan <-chan time.Time
+ if timeout > time.Duration(0) {
+ timeoutChan = time.After(timeout)
+ }
+
+ select {
+ case err := <-s.startChan:
+ if err != nil {
+ return err
+ }
+ break
+ case <-timeoutChan:
+ return ErrTimeout
+ }
+ return nil
+}
+
+// Close closes the stream by sending an empty data frame with the
+// finish flag set, indicating this side is finished with the stream.
+func (s *Stream) Close() error {
+ select {
+ case <-s.closeChan:
+ // Stream is now fully closed
+ s.conn.removeStream(s)
+ default:
+ break
+ }
+ return s.WriteData([]byte{}, true)
+}
+
+// Reset sends a reset frame, putting the stream into the fully closed state.
+func (s *Stream) Reset() error {
+ s.conn.removeStream(s)
+ return s.resetStream()
+}
+
+func (s *Stream) resetStream() error {
+ // Always call closeRemoteChannels, even if s.finished is already true.
+ // This makes it so that stream.Close() followed by stream.Reset() allows
+ // stream.Read() to unblock.
+ s.closeRemoteChannels()
+
+ s.finishLock.Lock()
+ if s.finished {
+ s.finishLock.Unlock()
+ return nil
+ }
+ s.finished = true
+ s.finishLock.Unlock()
+
+ resetFrame := &spdy.RstStreamFrame{
+ StreamId: s.streamId,
+ Status: spdy.Cancel,
+ }
+ return s.conn.framer.WriteFrame(resetFrame)
+}
+
+// CreateSubStream creates a stream using the current as the parent
+func (s *Stream) CreateSubStream(headers http.Header, fin bool) (*Stream, error) {
+ return s.conn.CreateStream(headers, s, fin)
+}
+
+// SetPriority sets the stream priority, does not affect the
+// remote priority of this stream after Open has been called.
+// Valid values are 0 through 7, 0 being the highest priority
+// and 7 the lowest.
+func (s *Stream) SetPriority(priority uint8) {
+ s.priority = priority
+}
+
+// SendHeader sends a header frame across the stream
+func (s *Stream) SendHeader(headers http.Header, fin bool) error {
+ return s.conn.sendHeaders(headers, s, fin)
+}
+
+// SendReply sends a reply on a stream, only valid to be called once
+// when handling a new stream
+func (s *Stream) SendReply(headers http.Header, fin bool) error {
+ if s.replyCond == nil {
+ return errors.New("cannot reply on initiated stream")
+ }
+ s.replyCond.L.Lock()
+ defer s.replyCond.L.Unlock()
+ if s.replied {
+ return nil
+ }
+
+ err := s.conn.sendReply(headers, s, fin)
+ if err != nil {
+ return err
+ }
+
+ s.replied = true
+ s.replyCond.Broadcast()
+ return nil
+}
+
+// Refuse sends a reset frame with the status refuse, only
+// valid to be called once when handling a new stream. This
+// may be used to indicate that a stream is not allowed
+// when http status codes are not being used.
+func (s *Stream) Refuse() error {
+ if s.replied {
+ return nil
+ }
+ s.replied = true
+ return s.conn.sendReset(spdy.RefusedStream, s)
+}
+
+// Cancel sends a reset frame with the status canceled. This
+// can be used at any time by the creator of the Stream to
+// indicate the stream is no longer needed.
+func (s *Stream) Cancel() error {
+ return s.conn.sendReset(spdy.Cancel, s)
+}
+
+// ReceiveHeader receives a header sent on the other side
+// of the stream. This function will block until a header
+// is received or stream is closed.
+func (s *Stream) ReceiveHeader() (http.Header, error) {
+ select {
+ case <-s.closeChan:
+ break
+ case header, ok := <-s.headerChan:
+ if !ok {
+ return nil, fmt.Errorf("header chan closed")
+ }
+ return header, nil
+ }
+ return nil, fmt.Errorf("stream closed")
+}
+
+// Parent returns the parent stream
+func (s *Stream) Parent() *Stream {
+ return s.parent
+}
+
+// Headers returns the headers used to create the stream
+func (s *Stream) Headers() http.Header {
+ return s.headers
+}
+
+// String returns the string version of stream using the
+// streamId to uniquely identify the stream
+func (s *Stream) String() string {
+ return fmt.Sprintf("stream:%d", s.streamId)
+}
+
+// Identifier returns a 32 bit identifier for the stream
+func (s *Stream) Identifier() uint32 {
+ return uint32(s.streamId)
+}
+
+// IsFinished returns whether the stream has finished
+// sending data
+func (s *Stream) IsFinished() bool {
+ return s.finished
+}
+
+// Implement net.Conn interface
+
+func (s *Stream) LocalAddr() net.Addr {
+ return s.conn.conn.LocalAddr()
+}
+
+func (s *Stream) RemoteAddr() net.Addr {
+ return s.conn.conn.RemoteAddr()
+}
+
+// TODO set per stream values instead of connection-wide
+
+func (s *Stream) SetDeadline(t time.Time) error {
+ return s.conn.conn.SetDeadline(t)
+}
+
+func (s *Stream) SetReadDeadline(t time.Time) error {
+ return s.conn.conn.SetReadDeadline(t)
+}
+
+func (s *Stream) SetWriteDeadline(t time.Time) error {
+ return s.conn.conn.SetWriteDeadline(t)
+}
+
+func (s *Stream) closeRemoteChannels() {
+ s.closeLock.Lock()
+ defer s.closeLock.Unlock()
+ select {
+ case <-s.closeChan:
+ default:
+ close(s.closeChan)
+ }
+}
diff --git a/vendor/github.com/docker/spdystream/utils.go b/vendor/github.com/docker/spdystream/utils.go
new file mode 100644
index 00000000000..1b2c199a402
--- /dev/null
+++ b/vendor/github.com/docker/spdystream/utils.go
@@ -0,0 +1,16 @@
+package spdystream
+
+import (
+ "log"
+ "os"
+)
+
+var (
+ DEBUG = os.Getenv("DEBUG")
+)
+
+func debugMessage(fmt string, args ...interface{}) {
+ if DEBUG != "" {
+ log.Printf(fmt, args...)
+ }
+}
diff --git a/vendor/go.uber.org/automaxprocs/.codecov.yml b/vendor/go.uber.org/automaxprocs/.codecov.yml
deleted file mode 100644
index 9a2ed4a9969..00000000000
--- a/vendor/go.uber.org/automaxprocs/.codecov.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-coverage:
- range: 80..100
- round: down
- precision: 2
-
- status:
- project: # measuring the overall project coverage
- default: # context, you can create multiple ones with custom titles
- enabled: yes # must be yes|true to enable this status
- target: 90% # specify the target coverage for each commit status
- # option: "auto" (must increase from parent commit or pull request base)
- # option: "X%" a static target percentage to hit
- if_not_found: success # if parent is not found report status as success, error, or failure
- if_ci_failed: error # if ci fails report status as success, error, or failure
diff --git a/vendor/go.uber.org/automaxprocs/.gitignore b/vendor/go.uber.org/automaxprocs/.gitignore
deleted file mode 100644
index dd7bcf5130b..00000000000
--- a/vendor/go.uber.org/automaxprocs/.gitignore
+++ /dev/null
@@ -1,33 +0,0 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-
-# Folders
-_obj
-_test
-vendor
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-_testmain.go
-
-*.exe
-*.test
-*.prof
-*.pprof
-*.out
-*.log
-coverage.txt
-
-/bin
-cover.out
-cover.html
diff --git a/vendor/go.uber.org/automaxprocs/.travis.yml b/vendor/go.uber.org/automaxprocs/.travis.yml
deleted file mode 100644
index 17974c73167..00000000000
--- a/vendor/go.uber.org/automaxprocs/.travis.yml
+++ /dev/null
@@ -1,24 +0,0 @@
-language: go
-sudo: false
-go_import_path: go.uber.org/automaxprocs
-
-env:
- global:
- - GO111MODULE=on
-
-matrix:
- include:
- - go: "1.12.x"
- - go: "1.13.x"
- env: LINT=1
-
-install:
- - make install
-
-script:
- - test -z "$LINT" || make lint
- - make test
-
-after_success:
- - make cover
- - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/go.uber.org/automaxprocs/CHANGELOG.md b/vendor/go.uber.org/automaxprocs/CHANGELOG.md
deleted file mode 100644
index 8ebd7f144a8..00000000000
--- a/vendor/go.uber.org/automaxprocs/CHANGELOG.md
+++ /dev/null
@@ -1,21 +0,0 @@
-# Changelog
-
-## v1.3.0 (2020-01-23)
-
-- Migrate to Go modules.
-
-## v1.2.0 (2018-02-22)
-
-- Fixed quota clamping to always round down rather than up; Rather than
- guaranteeing constant throttling at saturation, instead assume that the
- fractional CPU was added as a hedge for factors outside of Go's scheduler.
-
-## v1.1.0 (2017-11-10)
-
-- Log the new value of `GOMAXPROCS` rather than the current value.
-- Make logs more explicit about whether `GOMAXPROCS` was modified or not.
-- Allow customization of the minimum `GOMAXPROCS`, and modify default from 2 to 1.
-
-## v1.0.0 (2017-08-09)
-
-- Initial release.
diff --git a/vendor/go.uber.org/automaxprocs/CODE_OF_CONDUCT.md b/vendor/go.uber.org/automaxprocs/CODE_OF_CONDUCT.md
deleted file mode 100644
index e327d9aa5cd..00000000000
--- a/vendor/go.uber.org/automaxprocs/CODE_OF_CONDUCT.md
+++ /dev/null
@@ -1,75 +0,0 @@
-# Contributor Covenant Code of Conduct
-
-## Our Pledge
-
-In the interest of fostering an open and welcoming environment, we as
-contributors and maintainers pledge to making participation in our project and
-our community a harassment-free experience for everyone, regardless of age,
-body size, disability, ethnicity, gender identity and expression, level of
-experience, nationality, personal appearance, race, religion, or sexual
-identity and orientation.
-
-## Our Standards
-
-Examples of behavior that contributes to creating a positive environment
-include:
-
-* Using welcoming and inclusive language
-* Being respectful of differing viewpoints and experiences
-* Gracefully accepting constructive criticism
-* Focusing on what is best for the community
-* Showing empathy towards other community members
-
-Examples of unacceptable behavior by participants include:
-
-* The use of sexualized language or imagery and unwelcome sexual attention or
- advances
-* Trolling, insulting/derogatory comments, and personal or political attacks
-* Public or private harassment
-* Publishing others' private information, such as a physical or electronic
- address, without explicit permission
-* Other conduct which could reasonably be considered inappropriate in a
- professional setting
-
-## Our Responsibilities
-
-Project maintainers are responsible for clarifying the standards of acceptable
-behavior and are expected to take appropriate and fair corrective action in
-response to any instances of unacceptable behavior.
-
-Project maintainers have the right and responsibility to remove, edit, or
-reject comments, commits, code, wiki edits, issues, and other contributions
-that are not aligned to this Code of Conduct, or to ban temporarily or
-permanently any contributor for other behaviors that they deem inappropriate,
-threatening, offensive, or harmful.
-
-## Scope
-
-This Code of Conduct applies both within project spaces and in public spaces
-when an individual is representing the project or its community. Examples of
-representing a project or community include using an official project e-mail
-address, posting via an official social media account, or acting as an
-appointed representative at an online or offline event. Representation of a
-project may be further defined and clarified by project maintainers.
-
-## Enforcement
-
-Instances of abusive, harassing, or otherwise unacceptable behavior may be
-reported by contacting the project team at oss-conduct@uber.com. The project
-team will review and investigate all complaints, and will respond in a way
-that it deems appropriate to the circumstances. The project team is obligated
-to maintain confidentiality with regard to the reporter of an incident.
-Further details of specific enforcement policies may be posted separately.
-
-Project maintainers who do not follow or enforce the Code of Conduct in good
-faith may face temporary or permanent repercussions as determined by other
-members of the project's leadership.
-
-## Attribution
-
-This Code of Conduct is adapted from the [Contributor Covenant][homepage],
-version 1.4, available at
-[http://contributor-covenant.org/version/1/4][version].
-
-[homepage]: http://contributor-covenant.org
-[version]: http://contributor-covenant.org/version/1/4/
diff --git a/vendor/go.uber.org/automaxprocs/CONTRIBUTING.md b/vendor/go.uber.org/automaxprocs/CONTRIBUTING.md
deleted file mode 100644
index 2b6a6040d78..00000000000
--- a/vendor/go.uber.org/automaxprocs/CONTRIBUTING.md
+++ /dev/null
@@ -1,81 +0,0 @@
-# Contributing
-
-We'd love your help improving this package!
-
-If you'd like to add new exported APIs, please [open an issue][open-issue]
-describing your proposal — discussing API changes ahead of time makes
-pull request review much smoother. In your issue, pull request, and any other
-communications, please remember to treat your fellow contributors with
-respect! We take our [code of conduct](CODE_OF_CONDUCT.md) seriously.
-
-Note that you'll need to sign [Uber's Contributor License Agreement][cla]
-before we can accept any of your contributions. If necessary, a bot will remind
-you to accept the CLA when you open your pull request.
-
-## Setup
-
-[Fork][fork], then clone the repository:
-
-```
-mkdir -p $GOPATH/src/go.uber.org
-cd $GOPATH/src/go.uber.org
-git clone git@github.com:your_github_username/automaxprocs.git
-cd automaxprocs
-git remote add upstream https://github.com/uber-go/automaxprocs.git
-git fetch upstream
-```
-
-Install the test dependencies:
-
-```
-make dependencies
-```
-
-Make sure that the tests and the linters pass:
-
-```
-make test
-make lint
-```
-
-If you're not using the minor version of Go specified in the Makefile's
-`LINTABLE_MINOR_VERSIONS` variable, `make lint` doesn't do anything. This is
-fine, but it means that you'll only discover lint failures after you open your
-pull request.
-
-## Making Changes
-
-Start by creating a new branch for your changes:
-
-```
-cd $GOPATH/src/go.uber.org/automaxprocs
-git checkout master
-git fetch upstream
-git rebase upstream/master
-git checkout -b cool_new_feature
-```
-
-Make your changes, then ensure that `make lint` and `make test` still pass. If
-you're satisfied with your changes, push them to your fork.
-
-```
-git push origin cool_new_feature
-```
-
-Then use the GitHub UI to open a pull request.
-
-At this point, you're waiting on us to review your changes. We *try* to respond
-to issues and pull requests within a few business days, and we may suggest some
-improvements or alternatives. Once your changes are approved, one of the
-project maintainers will merge them.
-
-We're much more likely to approve your changes if you:
-
-* Add tests for new functionality.
-* Write a [good commit message][commit-message].
-* Maintain backward compatibility.
-
-[fork]: https://github.com/uber-go/automaxprocs/fork
-[open-issue]: https://github.com/uber-go/automaxprocs/issues/new
-[cla]: https://cla-assistant.io/uber-go/automaxprocs
-[commit-message]: http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html
diff --git a/vendor/go.uber.org/automaxprocs/LICENSE b/vendor/go.uber.org/automaxprocs/LICENSE
deleted file mode 100644
index 20dcf51d96d..00000000000
--- a/vendor/go.uber.org/automaxprocs/LICENSE
+++ /dev/null
@@ -1,19 +0,0 @@
-Copyright (c) 2017 Uber Technologies, Inc.
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-THE SOFTWARE.
\ No newline at end of file
diff --git a/vendor/go.uber.org/automaxprocs/Makefile b/vendor/go.uber.org/automaxprocs/Makefile
deleted file mode 100644
index 47967410849..00000000000
--- a/vendor/go.uber.org/automaxprocs/Makefile
+++ /dev/null
@@ -1,46 +0,0 @@
-export GOBIN ?= $(shell pwd)/bin
-
-GO_FILES := $(shell \
- find . '(' -path '*/.*' -o -path './vendor' ')' -prune \
- -o -name '*.go' -print | cut -b3-)
-
-GOLINT = $(GOBIN)/golint
-STATICCHECK = $(GOBIN)/staticcheck
-
-.PHONY: build
-build:
- go build ./...
-
-.PHONY: install
-install:
- go mod download
-
-.PHONY: test
-test:
- go test -race ./...
-
-.PHONY: cover
-cover:
- go test -coverprofile=cover.out -covermode=atomic -coverpkg=./... ./...
- go tool cover -html=cover.out -o cover.html
-
-$(GOLINT):
- go install golang.org/x/lint/golint
-
-$(STATICCHECK):
- go install honnef.co/go/tools/cmd/staticcheck
-
-.PHONY: lint
-lint: $(GOLINT) $(STATICCHECK)
- @rm -rf lint.log
- @echo "Checking gofmt"
- @gofmt -d -s $(GO_FILES) 2>&1 | tee lint.log
- @echo "Checking go vet"
- @go vet ./... 2>&1 | tee -a lint.log
- @echo "Checking golint"
- @$(GOLINT) ./... | tee -a lint.log
- @echo "Checking staticcheck"
- @$(STATICCHECK) ./... 2>&1 | tee -a lint.log
- @echo "Checking for license headers..."
- @./.build/check_license.sh | tee -a lint.log
- @[ ! -s lint.log ]
diff --git a/vendor/go.uber.org/automaxprocs/README.md b/vendor/go.uber.org/automaxprocs/README.md
deleted file mode 100644
index 7ad608c6290..00000000000
--- a/vendor/go.uber.org/automaxprocs/README.md
+++ /dev/null
@@ -1,46 +0,0 @@
-# automaxprocs [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov]
-
-Automatically set `GOMAXPROCS` to match Linux container CPU quota.
-
-## Installation
-
-`go get -u go.uber.org/automaxprocs`
-
-## Quick Start
-
-```go
-import _ "go.uber.org/automaxprocs"
-
-func main() {
- // Your application logic here.
-}
-```
-
-## Development Status: Stable
-
-All APIs are finalized, and no breaking changes will be made in the 1.x series
-of releases. Users of semver-aware dependency management systems should pin
-automaxprocs to `^1`.
-
-## Contributing
-
-We encourage and support an active, healthy community of contributors —
-including you! Details are in the [contribution guide](CONTRIBUTING.md) and
-the [code of conduct](CODE_OF_CONDUCT.md). The automaxprocs maintainers keep
-an eye on issues and pull requests, but you can also report any negative
-conduct to oss-conduct@uber.com. That email list is a private, safe space;
-even the automaxprocs maintainers don't have access, so don't hesitate to hold
-us to a high standard.
-
-
-
-Released under the [MIT License](LICENSE).
-
-[doc-img]: https://godoc.org/go.uber.org/automaxprocs?status.svg
-[doc]: https://godoc.org/go.uber.org/automaxprocs
-[ci-img]: https://travis-ci.com/uber-go/automaxprocs.svg?branch=master
-[ci]: https://travis-ci.com/uber-go/automaxprocs
-[cov-img]: https://codecov.io/gh/uber-go/automaxprocs/branch/master/graph/badge.svg
-[cov]: https://codecov.io/gh/uber-go/automaxprocs
-
-
diff --git a/vendor/go.uber.org/automaxprocs/automaxprocs.go b/vendor/go.uber.org/automaxprocs/automaxprocs.go
deleted file mode 100644
index 69946a3e1fd..00000000000
--- a/vendor/go.uber.org/automaxprocs/automaxprocs.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-// Package automaxprocs automatically sets GOMAXPROCS to match the Linux
-// container CPU quota, if any.
-package automaxprocs // import "go.uber.org/automaxprocs"
-
-import (
- "log"
-
- "go.uber.org/automaxprocs/maxprocs"
-)
-
-func init() {
- maxprocs.Set(maxprocs.Logger(log.Printf))
-}
diff --git a/vendor/go.uber.org/automaxprocs/glide.yaml b/vendor/go.uber.org/automaxprocs/glide.yaml
deleted file mode 100644
index d49aa7abf4d..00000000000
--- a/vendor/go.uber.org/automaxprocs/glide.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
-package: go.uber.org/automaxprocs
-import: []
-testImport:
-- package: github.com/stretchr/testify
- version: ^1.1.4
- subpackages:
- - assert
diff --git a/vendor/go.uber.org/automaxprocs/go.mod b/vendor/go.uber.org/automaxprocs/go.mod
deleted file mode 100644
index 845c04e56de..00000000000
--- a/vendor/go.uber.org/automaxprocs/go.mod
+++ /dev/null
@@ -1,9 +0,0 @@
-module go.uber.org/automaxprocs
-
-go 1.13
-
-require (
- github.com/stretchr/testify v1.4.0
- golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f
- honnef.co/go/tools v0.0.1-2019.2.3
-)
diff --git a/vendor/go.uber.org/automaxprocs/go.sum b/vendor/go.uber.org/automaxprocs/go.sum
deleted file mode 100644
index c3530e462dc..00000000000
--- a/vendor/go.uber.org/automaxprocs/go.sum
+++ /dev/null
@@ -1,42 +0,0 @@
-github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
-github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
-github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
-github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
-github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
-github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
-github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
-github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
-github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
-github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
-github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
-github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
-github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
-github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
-golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
-golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
-golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f h1:J5lckAjkw6qYlOZNj90mLYNTEKDvWeuc1yieZ8qUzUE=
-golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
-golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
-golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
-golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
-golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
-golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
-golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
-golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
-golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f h1:kDxGY2VmgABOe55qheT/TFqUMtcTHnomIPS1iv3G4Ms=
-golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
-golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
-gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
-gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
-gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
-gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw=
-gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
-honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM=
-honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroup.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroup.go
deleted file mode 100644
index 1257d0c92b6..00000000000
--- a/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroup.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-// +build linux
-
-package cgroups
-
-import (
- "bufio"
- "io"
- "os"
- "path/filepath"
- "strconv"
-)
-
-// CGroup represents the data structure for a Linux control group.
-type CGroup struct {
- path string
-}
-
-// NewCGroup returns a new *CGroup from a given path.
-func NewCGroup(path string) *CGroup {
- return &CGroup{path: path}
-}
-
-// Path returns the path of the CGroup*.
-func (cg *CGroup) Path() string {
- return cg.path
-}
-
-// ParamPath returns the path of the given cgroup param under itself.
-func (cg *CGroup) ParamPath(param string) string {
- return filepath.Join(cg.path, param)
-}
-
-// readFirstLine reads the first line from a cgroup param file.
-func (cg *CGroup) readFirstLine(param string) (string, error) {
- paramFile, err := os.Open(cg.ParamPath(param))
- if err != nil {
- return "", err
- }
- defer paramFile.Close()
-
- scanner := bufio.NewScanner(paramFile)
- if scanner.Scan() {
- return scanner.Text(), nil
- }
- if err := scanner.Err(); err != nil {
- return "", err
- }
- return "", io.ErrUnexpectedEOF
-}
-
-// readInt parses the first line from a cgroup param file as int.
-func (cg *CGroup) readInt(param string) (int, error) {
- text, err := cg.readFirstLine(param)
- if err != nil {
- return 0, err
- }
- return strconv.Atoi(text)
-}
diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups.go
deleted file mode 100644
index e2489b826ee..00000000000
--- a/vendor/go.uber.org/automaxprocs/internal/cgroups/cgroups.go
+++ /dev/null
@@ -1,117 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-// +build linux
-
-package cgroups
-
-const (
- // _cgroupFSType is the Linux CGroup file system type used in
- // `/proc/$PID/mountinfo`.
- _cgroupFSType = "cgroup"
- // _cgroupSubsysCPU is the CPU CGroup subsystem.
- _cgroupSubsysCPU = "cpu"
- // _cgroupSubsysCPUAcct is the CPU accounting CGroup subsystem.
- _cgroupSubsysCPUAcct = "cpuacct"
- // _cgroupSubsysCPUSet is the CPUSet CGroup subsystem.
- _cgroupSubsysCPUSet = "cpuset"
- // _cgroupSubsysMemory is the Memory CGroup subsystem.
- _cgroupSubsysMemory = "memory"
-
- // _cgroupCPUCFSQuotaUsParam is the file name for the CGroup CFS quota
- // parameter.
- _cgroupCPUCFSQuotaUsParam = "cpu.cfs_quota_us"
- // _cgroupCPUCFSPeriodUsParam is the file name for the CGroup CFS period
- // parameter.
- _cgroupCPUCFSPeriodUsParam = "cpu.cfs_period_us"
-)
-
-const (
- _procPathCGroup = "/proc/self/cgroup"
- _procPathMountInfo = "/proc/self/mountinfo"
-)
-
-// CGroups is a map that associates each CGroup with its subsystem name.
-type CGroups map[string]*CGroup
-
-// NewCGroups returns a new *CGroups from given `mountinfo` and `cgroup` files
-// under for some process under `/proc` file system (see also proc(5) for more
-// information).
-func NewCGroups(procPathMountInfo, procPathCGroup string) (CGroups, error) {
- cgroupSubsystems, err := parseCGroupSubsystems(procPathCGroup)
- if err != nil {
- return nil, err
- }
-
- cgroups := make(CGroups)
- newMountPoint := func(mp *MountPoint) error {
- if mp.FSType != _cgroupFSType {
- return nil
- }
-
- for _, opt := range mp.SuperOptions {
- subsys, exists := cgroupSubsystems[opt]
- if !exists {
- continue
- }
-
- cgroupPath, err := mp.Translate(subsys.Name)
- if err != nil {
- return err
- }
- cgroups[opt] = NewCGroup(cgroupPath)
- }
-
- return nil
- }
-
- if err := parseMountInfo(procPathMountInfo, newMountPoint); err != nil {
- return nil, err
- }
- return cgroups, nil
-}
-
-// NewCGroupsForCurrentProcess returns a new *CGroups instance for the current
-// process.
-func NewCGroupsForCurrentProcess() (CGroups, error) {
- return NewCGroups(_procPathMountInfo, _procPathCGroup)
-}
-
-// CPUQuota returns the CPU quota applied with the CPU cgroup controller.
-// It is a result of `cpu.cfs_quota_us / cpu.cfs_period_us`. If the value of
-// `cpu.cfs_quota_us` was not set (-1), the method returns `(-1, nil)`.
-func (cg CGroups) CPUQuota() (float64, bool, error) {
- cpuCGroup, exists := cg[_cgroupSubsysCPU]
- if !exists {
- return -1, false, nil
- }
-
- cfsQuotaUs, err := cpuCGroup.readInt(_cgroupCPUCFSQuotaUsParam)
- if defined := cfsQuotaUs > 0; err != nil || !defined {
- return -1, defined, err
- }
-
- cfsPeriodUs, err := cpuCGroup.readInt(_cgroupCPUCFSPeriodUsParam)
- if err != nil {
- return -1, false, err
- }
-
- return float64(cfsQuotaUs) / float64(cfsPeriodUs), true, nil
-}
diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/doc.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/doc.go
deleted file mode 100644
index 113555f63da..00000000000
--- a/vendor/go.uber.org/automaxprocs/internal/cgroups/doc.go
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-// Package cgroups provides utilities to access Linux control group (CGroups)
-// parameters (CPU quota, for example) for a given process.
-package cgroups
diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/errors.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/errors.go
deleted file mode 100644
index bad8d7ae88d..00000000000
--- a/vendor/go.uber.org/automaxprocs/internal/cgroups/errors.go
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-// +build linux
-
-package cgroups
-
-import "fmt"
-
-type cgroupSubsysFormatInvalidError struct {
- line string
-}
-
-type mountPointFormatInvalidError struct {
- line string
-}
-
-type pathNotExposedFromMountPointError struct {
- mountPoint string
- root string
- path string
-}
-
-func (err cgroupSubsysFormatInvalidError) Error() string {
- return fmt.Sprintf("invalid format for CGroupSubsys: %q", err.line)
-}
-
-func (err mountPointFormatInvalidError) Error() string {
- return fmt.Sprintf("invalid format for MountPoint: %q", err.line)
-}
-
-func (err pathNotExposedFromMountPointError) Error() string {
- return fmt.Sprintf("path %q is not a descendant of mount point root %q and cannot be exposed from %q", err.path, err.root, err.mountPoint)
-}
diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/mountpoint.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/mountpoint.go
deleted file mode 100644
index d6238d5cf78..00000000000
--- a/vendor/go.uber.org/automaxprocs/internal/cgroups/mountpoint.go
+++ /dev/null
@@ -1,166 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-// +build linux
-
-package cgroups
-
-import (
- "bufio"
- "os"
- "path/filepath"
- "strconv"
- "strings"
-)
-
-const (
- _mountInfoSep = " "
- _mountInfoOptsSep = ","
- _mountInfoOptionalFieldsSep = "-"
-)
-
-const (
- _miFieldIDMountID = iota
- _miFieldIDParentID
- _miFieldIDDeviceID
- _miFieldIDRoot
- _miFieldIDMountPoint
- _miFieldIDOptions
- _miFieldIDOptionalFields
-
- _miFieldCountFirstHalf
-)
-
-const (
- _miFieldOffsetFSType = iota
- _miFieldOffsetMountSource
- _miFieldOffsetSuperOptions
-
- _miFieldCountSecondHalf
-)
-
-const _miFieldCountMin = _miFieldCountFirstHalf + _miFieldCountSecondHalf
-
-// MountPoint is the data structure for the mount points in
-// `/proc/$PID/mountinfo`. See also proc(5) for more information.
-type MountPoint struct {
- MountID int
- ParentID int
- DeviceID string
- Root string
- MountPoint string
- Options []string
- OptionalFields []string
- FSType string
- MountSource string
- SuperOptions []string
-}
-
-// NewMountPointFromLine parses a line read from `/proc/$PID/mountinfo` and
-// returns a new *MountPoint.
-func NewMountPointFromLine(line string) (*MountPoint, error) {
- fields := strings.Split(line, _mountInfoSep)
-
- if len(fields) < _miFieldCountMin {
- return nil, mountPointFormatInvalidError{line}
- }
-
- mountID, err := strconv.Atoi(fields[_miFieldIDMountID])
- if err != nil {
- return nil, err
- }
-
- parentID, err := strconv.Atoi(fields[_miFieldIDParentID])
- if err != nil {
- return nil, err
- }
-
- for i, field := range fields[_miFieldIDOptionalFields:] {
- if field == _mountInfoOptionalFieldsSep {
- fsTypeStart := _miFieldIDOptionalFields + i + 1
-
- if len(fields) != fsTypeStart+_miFieldCountSecondHalf {
- return nil, mountPointFormatInvalidError{line}
- }
-
- miFieldIDFSType := _miFieldOffsetFSType + fsTypeStart
- miFieldIDMountSource := _miFieldOffsetMountSource + fsTypeStart
- miFieldIDSuperOptions := _miFieldOffsetSuperOptions + fsTypeStart
-
- return &MountPoint{
- MountID: mountID,
- ParentID: parentID,
- DeviceID: fields[_miFieldIDDeviceID],
- Root: fields[_miFieldIDRoot],
- MountPoint: fields[_miFieldIDMountPoint],
- Options: strings.Split(fields[_miFieldIDOptions], _mountInfoOptsSep),
- OptionalFields: fields[_miFieldIDOptionalFields:(fsTypeStart - 1)],
- FSType: fields[miFieldIDFSType],
- MountSource: fields[miFieldIDMountSource],
- SuperOptions: strings.Split(fields[miFieldIDSuperOptions], _mountInfoOptsSep),
- }, nil
- }
- }
-
- return nil, mountPointFormatInvalidError{line}
-}
-
-// Translate converts an absolute path inside the *MountPoint's file system to
-// the host file system path in the mount namespace the *MountPoint belongs to.
-func (mp *MountPoint) Translate(absPath string) (string, error) {
- relPath, err := filepath.Rel(mp.Root, absPath)
-
- if err != nil {
- return "", err
- }
- if relPath == ".." || strings.HasPrefix(relPath, "../") {
- return "", pathNotExposedFromMountPointError{
- mountPoint: mp.MountPoint,
- root: mp.Root,
- path: absPath,
- }
- }
-
- return filepath.Join(mp.MountPoint, relPath), nil
-}
-
-// parseMountInfo parses procPathMountInfo (usually at `/proc/$PID/mountinfo`)
-// and yields parsed *MountPoint into newMountPoint.
-func parseMountInfo(procPathMountInfo string, newMountPoint func(*MountPoint) error) error {
- mountInfoFile, err := os.Open(procPathMountInfo)
- if err != nil {
- return err
- }
- defer mountInfoFile.Close()
-
- scanner := bufio.NewScanner(mountInfoFile)
-
- for scanner.Scan() {
- mountPoint, err := NewMountPointFromLine(scanner.Text())
- if err != nil {
- return err
- }
- if err := newMountPoint(mountPoint); err != nil {
- return err
- }
- }
-
- return scanner.Err()
-}
diff --git a/vendor/go.uber.org/automaxprocs/internal/cgroups/subsys.go b/vendor/go.uber.org/automaxprocs/internal/cgroups/subsys.go
deleted file mode 100644
index 6dc1b6453cb..00000000000
--- a/vendor/go.uber.org/automaxprocs/internal/cgroups/subsys.go
+++ /dev/null
@@ -1,102 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-// +build linux
-
-package cgroups
-
-import (
- "bufio"
- "os"
- "strconv"
- "strings"
-)
-
-const (
- _cgroupSep = ":"
- _cgroupSubsysSep = ","
-)
-
-const (
- _csFieldIDID = iota
- _csFieldIDSubsystems
- _csFieldIDName
- _csFieldCount
-)
-
-// CGroupSubsys represents the data structure for entities in
-// `/proc/$PID/cgroup`. See also proc(5) for more information.
-type CGroupSubsys struct {
- ID int
- Subsystems []string
- Name string
-}
-
-// NewCGroupSubsysFromLine returns a new *CGroupSubsys by parsing a string in
-// the format of `/proc/$PID/cgroup`
-func NewCGroupSubsysFromLine(line string) (*CGroupSubsys, error) {
- fields := strings.Split(line, _cgroupSep)
-
- if len(fields) != _csFieldCount {
- return nil, cgroupSubsysFormatInvalidError{line}
- }
-
- id, err := strconv.Atoi(fields[_csFieldIDID])
- if err != nil {
- return nil, err
- }
-
- cgroup := &CGroupSubsys{
- ID: id,
- Subsystems: strings.Split(fields[_csFieldIDSubsystems], _cgroupSubsysSep),
- Name: fields[_csFieldIDName],
- }
-
- return cgroup, nil
-}
-
-// parseCGroupSubsystems parses procPathCGroup (usually at `/proc/$PID/cgroup`)
-// and returns a new map[string]*CGroupSubsys.
-func parseCGroupSubsystems(procPathCGroup string) (map[string]*CGroupSubsys, error) {
- cgroupFile, err := os.Open(procPathCGroup)
- if err != nil {
- return nil, err
- }
- defer cgroupFile.Close()
-
- scanner := bufio.NewScanner(cgroupFile)
- subsystems := make(map[string]*CGroupSubsys)
-
- for scanner.Scan() {
- cgroup, err := NewCGroupSubsysFromLine(scanner.Text())
- if err != nil {
- return nil, err
- }
- for _, subsys := range cgroup.Subsystems {
- subsystems[subsys] = cgroup
- }
- }
-
- if err := scanner.Err(); err != nil {
- return nil, err
- }
-
- return subsystems, nil
-}
diff --git a/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go b/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go
deleted file mode 100644
index 37699c31d3e..00000000000
--- a/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_linux.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-// +build linux
-
-package runtime
-
-import (
- "math"
-
- cg "go.uber.org/automaxprocs/internal/cgroups"
-)
-
-// CPUQuotaToGOMAXPROCS converts the CPU quota applied to the calling process
-// to a valid GOMAXPROCS value.
-func CPUQuotaToGOMAXPROCS(minValue int) (int, CPUQuotaStatus, error) {
- cgroups, err := cg.NewCGroupsForCurrentProcess()
- if err != nil {
- return -1, CPUQuotaUndefined, err
- }
-
- quota, defined, err := cgroups.CPUQuota()
- if !defined || err != nil {
- return -1, CPUQuotaUndefined, err
- }
-
- maxProcs := int(math.Floor(quota))
- if minValue > 0 && maxProcs < minValue {
- return minValue, CPUQuotaMinUsed, nil
- }
- return maxProcs, CPUQuotaUsed, nil
-}
diff --git a/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go b/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go
deleted file mode 100644
index 5915a2efbfb..00000000000
--- a/vendor/go.uber.org/automaxprocs/internal/runtime/cpu_quota_unsupported.go
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-// +build !linux
-
-package runtime
-
-// CPUQuotaToGOMAXPROCS converts the CPU quota applied to the calling process
-// to a valid GOMAXPROCS value. This is Linux-specific and not supported in the
-// current OS.
-func CPUQuotaToGOMAXPROCS(_ int) (int, CPUQuotaStatus, error) {
- return -1, CPUQuotaUndefined, nil
-}
diff --git a/vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go b/vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go
deleted file mode 100644
index a0b078147ca..00000000000
--- a/vendor/go.uber.org/automaxprocs/internal/runtime/runtime.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package runtime
-
-// CPUQuotaStatus presents the status of how CPU quota is used
-type CPUQuotaStatus int
-
-const (
- // CPUQuotaUndefined is returned when CPU quota is undefined
- CPUQuotaUndefined CPUQuotaStatus = iota
- // CPUQuotaUsed is returned when a valid CPU quota can be used
- CPUQuotaUsed
- // CPUQuotaMinUsed is return when CPU quota is larger than the min value
- CPUQuotaMinUsed
-)
diff --git a/vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go b/vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go
deleted file mode 100644
index ec032438fd7..00000000000
--- a/vendor/go.uber.org/automaxprocs/maxprocs/maxprocs.go
+++ /dev/null
@@ -1,130 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-// Package maxprocs lets Go programs easily configure runtime.GOMAXPROCS to
-// match the configured Linux CPU quota. Unlike the top-level automaxprocs
-// package, it lets the caller configure logging and handle errors.
-package maxprocs // import "go.uber.org/automaxprocs/maxprocs"
-
-import (
- "os"
- "runtime"
-
- iruntime "go.uber.org/automaxprocs/internal/runtime"
-)
-
-const _maxProcsKey = "GOMAXPROCS"
-
-func currentMaxProcs() int {
- return runtime.GOMAXPROCS(0)
-}
-
-type config struct {
- printf func(string, ...interface{})
- procs func(int) (int, iruntime.CPUQuotaStatus, error)
- minGOMAXPROCS int
-}
-
-func (c *config) log(fmt string, args ...interface{}) {
- if c.printf != nil {
- c.printf(fmt, args...)
- }
-}
-
-// An Option alters the behavior of Set.
-type Option interface {
- apply(*config)
-}
-
-// Logger uses the supplied printf implementation for log output. By default,
-// Set doesn't log anything.
-func Logger(printf func(string, ...interface{})) Option {
- return optionFunc(func(cfg *config) {
- cfg.printf = printf
- })
-}
-
-// Min sets the minimum GOMAXPROCS value that will be used.
-// Any value below 1 is ignored.
-func Min(n int) Option {
- return optionFunc(func(cfg *config) {
- if n >= 1 {
- cfg.minGOMAXPROCS = n
- }
- })
-}
-
-type optionFunc func(*config)
-
-func (of optionFunc) apply(cfg *config) { of(cfg) }
-
-// Set GOMAXPROCS to match the Linux container CPU quota (if any), returning
-// any error encountered and an undo function.
-//
-// Set is a no-op on non-Linux systems and in Linux environments without a
-// configured CPU quota.
-func Set(opts ...Option) (func(), error) {
- cfg := &config{
- procs: iruntime.CPUQuotaToGOMAXPROCS,
- minGOMAXPROCS: 1,
- }
- for _, o := range opts {
- o.apply(cfg)
- }
-
- undoNoop := func() {
- cfg.log("maxprocs: No GOMAXPROCS change to reset")
- }
-
- // Honor the GOMAXPROCS environment variable if present. Otherwise, amend
- // `runtime.GOMAXPROCS()` with the current process' CPU quota if the OS is
- // Linux, and guarantee a minimum value of 1. The minimum guaranteed value
- // can be overriden using `maxprocs.Min()`.
- if max, exists := os.LookupEnv(_maxProcsKey); exists {
- cfg.log("maxprocs: Honoring GOMAXPROCS=%q as set in environment", max)
- return undoNoop, nil
- }
-
- maxProcs, status, err := cfg.procs(cfg.minGOMAXPROCS)
- if err != nil {
- return undoNoop, err
- }
-
- if status == iruntime.CPUQuotaUndefined {
- cfg.log("maxprocs: Leaving GOMAXPROCS=%v: CPU quota undefined", currentMaxProcs())
- return undoNoop, nil
- }
-
- prev := currentMaxProcs()
- undo := func() {
- cfg.log("maxprocs: Resetting GOMAXPROCS to %v", prev)
- runtime.GOMAXPROCS(prev)
- }
-
- switch status {
- case iruntime.CPUQuotaMinUsed:
- cfg.log("maxprocs: Updating GOMAXPROCS=%v: using minimum allowed GOMAXPROCS", maxProcs)
- case iruntime.CPUQuotaUsed:
- cfg.log("maxprocs: Updating GOMAXPROCS=%v: determined from CPU quota", maxProcs)
- }
-
- runtime.GOMAXPROCS(maxProcs)
- return undo, nil
-}
diff --git a/vendor/go.uber.org/automaxprocs/maxprocs/version.go b/vendor/go.uber.org/automaxprocs/maxprocs/version.go
deleted file mode 100644
index 0161a67cd90..00000000000
--- a/vendor/go.uber.org/automaxprocs/maxprocs/version.go
+++ /dev/null
@@ -1,24 +0,0 @@
-// Copyright (c) 2017 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package maxprocs
-
-// Version is the current package version.
-const Version = "1.3.0"
diff --git a/vendor/go.uber.org/automaxprocs/tools.go b/vendor/go.uber.org/automaxprocs/tools.go
deleted file mode 100644
index 5759fa365a2..00000000000
--- a/vendor/go.uber.org/automaxprocs/tools.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright (c) 2020 Uber Technologies, Inc.
-//
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-// +build tools
-
-package automaxprocs
-
-import (
- // Tools we use during development.
- _ "golang.org/x/lint/golint"
- _ "honnef.co/go/tools/cmd/staticcheck"
-)
diff --git a/vendor/golang.org/x/tools/go/analysis/analysis.go b/vendor/golang.org/x/tools/go/analysis/analysis.go
deleted file mode 100644
index 8c9977355c9..00000000000
--- a/vendor/golang.org/x/tools/go/analysis/analysis.go
+++ /dev/null
@@ -1,237 +0,0 @@
-package analysis
-
-import (
- "flag"
- "fmt"
- "go/ast"
- "go/token"
- "go/types"
- "reflect"
-
- "golang.org/x/tools/internal/analysisinternal"
-)
-
-// An Analyzer describes an analysis function and its options.
-type Analyzer struct {
- // The Name of the analyzer must be a valid Go identifier
- // as it may appear in command-line flags, URLs, and so on.
- Name string
-
- // Doc is the documentation for the analyzer.
- // The part before the first "\n\n" is the title
- // (no capital or period, max ~60 letters).
- Doc string
-
- // Flags defines any flags accepted by the analyzer.
- // The manner in which these flags are exposed to the user
- // depends on the driver which runs the analyzer.
- Flags flag.FlagSet
-
- // Run applies the analyzer to a package.
- // It returns an error if the analyzer failed.
- //
- // On success, the Run function may return a result
- // computed by the Analyzer; its type must match ResultType.
- // The driver makes this result available as an input to
- // another Analyzer that depends directly on this one (see
- // Requires) when it analyzes the same package.
- //
- // To pass analysis results between packages (and thus
- // potentially between address spaces), use Facts, which are
- // serializable.
- Run func(*Pass) (interface{}, error)
-
- // RunDespiteErrors allows the driver to invoke
- // the Run method of this analyzer even on a
- // package that contains parse or type errors.
- RunDespiteErrors bool
-
- // Requires is a set of analyzers that must run successfully
- // before this one on a given package. This analyzer may inspect
- // the outputs produced by each analyzer in Requires.
- // The graph over analyzers implied by Requires edges must be acyclic.
- //
- // Requires establishes a "horizontal" dependency between
- // analysis passes (different analyzers, same package).
- Requires []*Analyzer
-
- // ResultType is the type of the optional result of the Run function.
- ResultType reflect.Type
-
- // FactTypes indicates that this analyzer imports and exports
- // Facts of the specified concrete types.
- // An analyzer that uses facts may assume that its import
- // dependencies have been similarly analyzed before it runs.
- // Facts must be pointers.
- //
- // FactTypes establishes a "vertical" dependency between
- // analysis passes (same analyzer, different packages).
- FactTypes []Fact
-}
-
-func (a *Analyzer) String() string { return a.Name }
-
-func init() {
- // Set the analysisinternal functions to be able to pass type errors
- // to the Pass type without modifying the go/analysis API.
- analysisinternal.SetTypeErrors = func(p interface{}, errors []types.Error) {
- p.(*Pass).typeErrors = errors
- }
- analysisinternal.GetTypeErrors = func(p interface{}) []types.Error {
- return p.(*Pass).typeErrors
- }
-}
-
-// A Pass provides information to the Run function that
-// applies a specific analyzer to a single Go package.
-//
-// It forms the interface between the analysis logic and the driver
-// program, and has both input and an output components.
-//
-// As in a compiler, one pass may depend on the result computed by another.
-//
-// The Run function should not call any of the Pass functions concurrently.
-type Pass struct {
- Analyzer *Analyzer // the identity of the current analyzer
-
- // syntax and type information
- Fset *token.FileSet // file position information
- Files []*ast.File // the abstract syntax tree of each file
- OtherFiles []string // names of non-Go files of this package
- Pkg *types.Package // type information about the package
- TypesInfo *types.Info // type information about the syntax trees
- TypesSizes types.Sizes // function for computing sizes of types
-
- // Report reports a Diagnostic, a finding about a specific location
- // in the analyzed source code such as a potential mistake.
- // It may be called by the Run function.
- Report func(Diagnostic)
-
- // ResultOf provides the inputs to this analysis pass, which are
- // the corresponding results of its prerequisite analyzers.
- // The map keys are the elements of Analysis.Required,
- // and the type of each corresponding value is the required
- // analysis's ResultType.
- ResultOf map[*Analyzer]interface{}
-
- // -- facts --
-
- // ImportObjectFact retrieves a fact associated with obj.
- // Given a value ptr of type *T, where *T satisfies Fact,
- // ImportObjectFact copies the value to *ptr.
- //
- // ImportObjectFact panics if called after the pass is complete.
- // ImportObjectFact is not concurrency-safe.
- ImportObjectFact func(obj types.Object, fact Fact) bool
-
- // ImportPackageFact retrieves a fact associated with package pkg,
- // which must be this package or one of its dependencies.
- // See comments for ImportObjectFact.
- ImportPackageFact func(pkg *types.Package, fact Fact) bool
-
- // ExportObjectFact associates a fact of type *T with the obj,
- // replacing any previous fact of that type.
- //
- // ExportObjectFact panics if it is called after the pass is
- // complete, or if obj does not belong to the package being analyzed.
- // ExportObjectFact is not concurrency-safe.
- ExportObjectFact func(obj types.Object, fact Fact)
-
- // ExportPackageFact associates a fact with the current package.
- // See comments for ExportObjectFact.
- ExportPackageFact func(fact Fact)
-
- // AllPackageFacts returns a new slice containing all package facts of the analysis's FactTypes
- // in unspecified order.
- // WARNING: This is an experimental API and may change in the future.
- AllPackageFacts func() []PackageFact
-
- // AllObjectFacts returns a new slice containing all object facts of the analysis's FactTypes
- // in unspecified order.
- // WARNING: This is an experimental API and may change in the future.
- AllObjectFacts func() []ObjectFact
-
- // typeErrors contains types.Errors that are associated with the pkg.
- typeErrors []types.Error
-
- /* Further fields may be added in future. */
- // For example, suggested or applied refactorings.
-}
-
-// PackageFact is a package together with an associated fact.
-// WARNING: This is an experimental API and may change in the future.
-type PackageFact struct {
- Package *types.Package
- Fact Fact
-}
-
-// ObjectFact is an object together with an associated fact.
-// WARNING: This is an experimental API and may change in the future.
-type ObjectFact struct {
- Object types.Object
- Fact Fact
-}
-
-// Reportf is a helper function that reports a Diagnostic using the
-// specified position and formatted error message.
-func (pass *Pass) Reportf(pos token.Pos, format string, args ...interface{}) {
- msg := fmt.Sprintf(format, args...)
- pass.Report(Diagnostic{Pos: pos, Message: msg})
-}
-
-// The Range interface provides a range. It's equivalent to and satisfied by
-// ast.Node.
-type Range interface {
- Pos() token.Pos // position of first character belonging to the node
- End() token.Pos // position of first character immediately after the node
-}
-
-// ReportRangef is a helper function that reports a Diagnostic using the
-// range provided. ast.Node values can be passed in as the range because
-// they satisfy the Range interface.
-func (pass *Pass) ReportRangef(rng Range, format string, args ...interface{}) {
- msg := fmt.Sprintf(format, args...)
- pass.Report(Diagnostic{Pos: rng.Pos(), End: rng.End(), Message: msg})
-}
-
-func (pass *Pass) String() string {
- return fmt.Sprintf("%s@%s", pass.Analyzer.Name, pass.Pkg.Path())
-}
-
-// A Fact is an intermediate fact produced during analysis.
-//
-// Each fact is associated with a named declaration (a types.Object) or
-// with a package as a whole. A single object or package may have
-// multiple associated facts, but only one of any particular fact type.
-//
-// A Fact represents a predicate such as "never returns", but does not
-// represent the subject of the predicate such as "function F" or "package P".
-//
-// Facts may be produced in one analysis pass and consumed by another
-// analysis pass even if these are in different address spaces.
-// If package P imports Q, all facts about Q produced during
-// analysis of that package will be available during later analysis of P.
-// Facts are analogous to type export data in a build system:
-// just as export data enables separate compilation of several passes,
-// facts enable "separate analysis".
-//
-// Each pass (a, p) starts with the set of facts produced by the
-// same analyzer a applied to the packages directly imported by p.
-// The analysis may add facts to the set, and they may be exported in turn.
-// An analysis's Run function may retrieve facts by calling
-// Pass.Import{Object,Package}Fact and update them using
-// Pass.Export{Object,Package}Fact.
-//
-// A fact is logically private to its Analysis. To pass values
-// between different analyzers, use the results mechanism;
-// see Analyzer.Requires, Analyzer.ResultType, and Pass.ResultOf.
-//
-// A Fact type must be a pointer.
-// Facts are encoded and decoded using encoding/gob.
-// A Fact may implement the GobEncoder/GobDecoder interfaces
-// to customize its encoding. Fact encoding should not fail.
-//
-// A Fact should not be modified once exported.
-type Fact interface {
- AFact() // dummy method to avoid type errors
-}
diff --git a/vendor/golang.org/x/tools/go/analysis/diagnostic.go b/vendor/golang.org/x/tools/go/analysis/diagnostic.go
deleted file mode 100644
index 57eaf6faa2a..00000000000
--- a/vendor/golang.org/x/tools/go/analysis/diagnostic.go
+++ /dev/null
@@ -1,61 +0,0 @@
-package analysis
-
-import "go/token"
-
-// A Diagnostic is a message associated with a source location or range.
-//
-// An Analyzer may return a variety of diagnostics; the optional Category,
-// which should be a constant, may be used to classify them.
-// It is primarily intended to make it easy to look up documentation.
-//
-// If End is provided, the diagnostic is specified to apply to the range between
-// Pos and End.
-type Diagnostic struct {
- Pos token.Pos
- End token.Pos // optional
- Category string // optional
- Message string
-
- // SuggestedFixes contains suggested fixes for a diagnostic which can be used to perform
- // edits to a file that address the diagnostic.
- // TODO(matloob): Should multiple SuggestedFixes be allowed for a diagnostic?
- // Diagnostics should not contain SuggestedFixes that overlap.
- // Experimental: This API is experimental and may change in the future.
- SuggestedFixes []SuggestedFix // optional
-
- // Experimental: This API is experimental and may change in the future.
- Related []RelatedInformation // optional
-}
-
-// RelatedInformation contains information related to a diagnostic.
-// For example, a diagnostic that flags duplicated declarations of a
-// variable may include one RelatedInformation per existing
-// declaration.
-type RelatedInformation struct {
- Pos token.Pos
- End token.Pos
- Message string
-}
-
-// A SuggestedFix is a code change associated with a Diagnostic that a user can choose
-// to apply to their code. Usually the SuggestedFix is meant to fix the issue flagged
-// by the diagnostic.
-// TextEdits for a SuggestedFix should not overlap. TextEdits for a SuggestedFix
-// should not contain edits for other packages.
-// Experimental: This API is experimental and may change in the future.
-type SuggestedFix struct {
- // A description for this suggested fix to be shown to a user deciding
- // whether to accept it.
- Message string
- TextEdits []TextEdit
-}
-
-// A TextEdit represents the replacement of the code between Pos and End with the new text.
-// Each TextEdit should apply to a single file. End should not be earlier in the file than Pos.
-// Experimental: This API is experimental and may change in the future.
-type TextEdit struct {
- // For a pure insertion, End can either be set to Pos or token.NoPos.
- Pos token.Pos
- End token.Pos
- NewText []byte
-}
diff --git a/vendor/golang.org/x/tools/go/analysis/doc.go b/vendor/golang.org/x/tools/go/analysis/doc.go
deleted file mode 100644
index fb17a0e4154..00000000000
--- a/vendor/golang.org/x/tools/go/analysis/doc.go
+++ /dev/null
@@ -1,310 +0,0 @@
-/*
-
-Package analysis defines the interface between a modular static
-analysis and an analysis driver program.
-
-
-Background
-
-A static analysis is a function that inspects a package of Go code and
-reports a set of diagnostics (typically mistakes in the code), and
-perhaps produces other results as well, such as suggested refactorings
-or other facts. An analysis that reports mistakes is informally called a
-"checker". For example, the printf checker reports mistakes in
-fmt.Printf format strings.
-
-A "modular" analysis is one that inspects one package at a time but can
-save information from a lower-level package and use it when inspecting a
-higher-level package, analogous to separate compilation in a toolchain.
-The printf checker is modular: when it discovers that a function such as
-log.Fatalf delegates to fmt.Printf, it records this fact, and checks
-calls to that function too, including calls made from another package.
-
-By implementing a common interface, checkers from a variety of sources
-can be easily selected, incorporated, and reused in a wide range of
-driver programs including command-line tools (such as vet), text editors and
-IDEs, build and test systems (such as go build, Bazel, or Buck), test
-frameworks, code review tools, code-base indexers (such as SourceGraph),
-documentation viewers (such as godoc), batch pipelines for large code
-bases, and so on.
-
-
-Analyzer
-
-The primary type in the API is Analyzer. An Analyzer statically
-describes an analysis function: its name, documentation, flags,
-relationship to other analyzers, and of course, its logic.
-
-To define an analysis, a user declares a (logically constant) variable
-of type Analyzer. Here is a typical example from one of the analyzers in
-the go/analysis/passes/ subdirectory:
-
- package unusedresult
-
- var Analyzer = &analysis.Analyzer{
- Name: "unusedresult",
- Doc: "check for unused results of calls to some functions",
- Run: run,
- ...
- }
-
- func run(pass *analysis.Pass) (interface{}, error) {
- ...
- }
-
-An analysis driver is a program such as vet that runs a set of
-analyses and prints the diagnostics that they report.
-The driver program must import the list of Analyzers it needs.
-Typically each Analyzer resides in a separate package.
-To add a new Analyzer to an existing driver, add another item to the list:
-
- import ( "unusedresult"; "nilness"; "printf" )
-
- var analyses = []*analysis.Analyzer{
- unusedresult.Analyzer,
- nilness.Analyzer,
- printf.Analyzer,
- }
-
-A driver may use the name, flags, and documentation to provide on-line
-help that describes the analyses it performs.
-The doc comment contains a brief one-line summary,
-optionally followed by paragraphs of explanation.
-
-The Analyzer type has more fields besides those shown above:
-
- type Analyzer struct {
- Name string
- Doc string
- Flags flag.FlagSet
- Run func(*Pass) (interface{}, error)
- RunDespiteErrors bool
- ResultType reflect.Type
- Requires []*Analyzer
- FactTypes []Fact
- }
-
-The Flags field declares a set of named (global) flag variables that
-control analysis behavior. Unlike vet, analysis flags are not declared
-directly in the command line FlagSet; it is up to the driver to set the
-flag variables. A driver for a single analysis, a, might expose its flag
-f directly on the command line as -f, whereas a driver for multiple
-analyses might prefix the flag name by the analysis name (-a.f) to avoid
-ambiguity. An IDE might expose the flags through a graphical interface,
-and a batch pipeline might configure them from a config file.
-See the "findcall" analyzer for an example of flags in action.
-
-The RunDespiteErrors flag indicates whether the analysis is equipped to
-handle ill-typed code. If not, the driver will skip the analysis if
-there were parse or type errors.
-The optional ResultType field specifies the type of the result value
-computed by this analysis and made available to other analyses.
-The Requires field specifies a list of analyses upon which
-this one depends and whose results it may access, and it constrains the
-order in which a driver may run analyses.
-The FactTypes field is discussed in the section on Modularity.
-The analysis package provides a Validate function to perform basic
-sanity checks on an Analyzer, such as that its Requires graph is
-acyclic, its fact and result types are unique, and so on.
-
-Finally, the Run field contains a function to be called by the driver to
-execute the analysis on a single package. The driver passes it an
-instance of the Pass type.
-
-
-Pass
-
-A Pass describes a single unit of work: the application of a particular
-Analyzer to a particular package of Go code.
-The Pass provides information to the Analyzer's Run function about the
-package being analyzed, and provides operations to the Run function for
-reporting diagnostics and other information back to the driver.
-
- type Pass struct {
- Fset *token.FileSet
- Files []*ast.File
- OtherFiles []string
- Pkg *types.Package
- TypesInfo *types.Info
- ResultOf map[*Analyzer]interface{}
- Report func(Diagnostic)
- ...
- }
-
-The Fset, Files, Pkg, and TypesInfo fields provide the syntax trees,
-type information, and source positions for a single package of Go code.
-
-The OtherFiles field provides the names, but not the contents, of non-Go
-files such as assembly that are part of this package. See the "asmdecl"
-or "buildtags" analyzers for examples of loading non-Go files and reporting
-diagnostics against them.
-
-The ResultOf field provides the results computed by the analyzers
-required by this one, as expressed in its Analyzer.Requires field. The
-driver runs the required analyzers first and makes their results
-available in this map. Each Analyzer must return a value of the type
-described in its Analyzer.ResultType field.
-For example, the "ctrlflow" analyzer returns a *ctrlflow.CFGs, which
-provides a control-flow graph for each function in the package (see
-golang.org/x/tools/go/cfg); the "inspect" analyzer returns a value that
-enables other Analyzers to traverse the syntax trees of the package more
-efficiently; and the "buildssa" analyzer constructs an SSA-form
-intermediate representation.
-Each of these Analyzers extends the capabilities of later Analyzers
-without adding a dependency to the core API, so an analysis tool pays
-only for the extensions it needs.
-
-The Report function emits a diagnostic, a message associated with a
-source position. For most analyses, diagnostics are their primary
-result.
-For convenience, Pass provides a helper method, Reportf, to report a new
-diagnostic by formatting a string.
-Diagnostic is defined as:
-
- type Diagnostic struct {
- Pos token.Pos
- Category string // optional
- Message string
- }
-
-The optional Category field is a short identifier that classifies the
-kind of message when an analysis produces several kinds of diagnostic.
-
-Many analyses want to associate diagnostics with a severity level.
-Because Diagnostic does not have a severity level field, an Analyzer's
-diagnostics effectively all have the same severity level. To separate which
-diagnostics are high severity and which are low severity, expose multiple
-Analyzers instead. Analyzers should also be separated when their
-diagnostics belong in different groups, or could be tagged differently
-before being shown to the end user. Analyzers should document their severity
-level to help downstream tools surface diagnostics properly.
-
-Most Analyzers inspect typed Go syntax trees, but a few, such as asmdecl
-and buildtag, inspect the raw text of Go source files or even non-Go
-files such as assembly. To report a diagnostic against a line of a
-raw text file, use the following sequence:
-
- content, err := ioutil.ReadFile(filename)
- if err != nil { ... }
- tf := fset.AddFile(filename, -1, len(content))
- tf.SetLinesForContent(content)
- ...
- pass.Reportf(tf.LineStart(line), "oops")
-
-
-Modular analysis with Facts
-
-To improve efficiency and scalability, large programs are routinely
-built using separate compilation: units of the program are compiled
-separately, and recompiled only when one of their dependencies changes;
-independent modules may be compiled in parallel. The same technique may
-be applied to static analyses, for the same benefits. Such analyses are
-described as "modular".
-
-A compiler’s type checker is an example of a modular static analysis.
-Many other checkers we would like to apply to Go programs can be
-understood as alternative or non-standard type systems. For example,
-vet's printf checker infers whether a function has the "printf wrapper"
-type, and it applies stricter checks to calls of such functions. In
-addition, it records which functions are printf wrappers for use by
-later analysis passes to identify other printf wrappers by induction.
-A result such as “f is a printf wrapper” that is not interesting by
-itself but serves as a stepping stone to an interesting result (such as
-a diagnostic) is called a "fact".
-
-The analysis API allows an analysis to define new types of facts, to
-associate facts of these types with objects (named entities) declared
-within the current package, or with the package as a whole, and to query
-for an existing fact of a given type associated with an object or
-package.
-
-An Analyzer that uses facts must declare their types:
-
- var Analyzer = &analysis.Analyzer{
- Name: "printf",
- FactTypes: []analysis.Fact{new(isWrapper)},
- ...
- }
-
- type isWrapper struct{} // => *types.Func f “is a printf wrapper”
-
-The driver program ensures that facts for a pass’s dependencies are
-generated before analyzing the package and is responsible for propagating
-facts from one package to another, possibly across address spaces.
-Consequently, Facts must be serializable. The API requires that drivers
-use the gob encoding, an efficient, robust, self-describing binary
-protocol. A fact type may implement the GobEncoder/GobDecoder interfaces
-if the default encoding is unsuitable. Facts should be stateless.
-
-The Pass type has functions to import and export facts,
-associated either with an object or with a package:
-
- type Pass struct {
- ...
- ExportObjectFact func(types.Object, Fact)
- ImportObjectFact func(types.Object, Fact) bool
-
- ExportPackageFact func(fact Fact)
- ImportPackageFact func(*types.Package, Fact) bool
- }
-
-An Analyzer may only export facts associated with the current package or
-its objects, though it may import facts from any package or object that
-is an import dependency of the current package.
-
-Conceptually, ExportObjectFact(obj, fact) inserts fact into a hidden map keyed by
-the pair (obj, TypeOf(fact)), and the ImportObjectFact function
-retrieves the entry from this map and copies its value into the variable
-pointed to by fact. This scheme assumes that the concrete type of fact
-is a pointer; this assumption is checked by the Validate function.
-See the "printf" analyzer for an example of object facts in action.
-
-Some driver implementations (such as those based on Bazel and Blaze) do
-not currently apply analyzers to packages of the standard library.
-Therefore, for best results, analyzer authors should not rely on
-analysis facts being available for standard packages.
-For example, although the printf checker is capable of deducing during
-analysis of the log package that log.Printf is a printf wrapper,
-this fact is built in to the analyzer so that it correctly checks
-calls to log.Printf even when run in a driver that does not apply
-it to standard packages. We would like to remove this limitation in future.
-
-
-Testing an Analyzer
-
-The analysistest subpackage provides utilities for testing an Analyzer.
-In a few lines of code, it is possible to run an analyzer on a package
-of testdata files and check that it reported all the expected
-diagnostics and facts (and no more). Expectations are expressed using
-"// want ..." comments in the input code.
-
-
-Standalone commands
-
-Analyzers are provided in the form of packages that a driver program is
-expected to import. The vet command imports a set of several analyzers,
-but users may wish to define their own analysis commands that perform
-additional checks. To simplify the task of creating an analysis command,
-either for a single analyzer or for a whole suite, we provide the
-singlechecker and multichecker subpackages.
-
-The singlechecker package provides the main function for a command that
-runs one analyzer. By convention, each analyzer such as
-go/passes/findcall should be accompanied by a singlechecker-based
-command such as go/analysis/passes/findcall/cmd/findcall, defined in its
-entirety as:
-
- package main
-
- import (
- "golang.org/x/tools/go/analysis/passes/findcall"
- "golang.org/x/tools/go/analysis/singlechecker"
- )
-
- func main() { singlechecker.Main(findcall.Analyzer) }
-
-A tool that provides multiple analyzers can use multichecker in a
-similar way, giving it the list of Analyzers.
-
-*/
-package analysis
diff --git a/vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go b/vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go
deleted file mode 100644
index 2856df137c5..00000000000
--- a/vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package inspect defines an Analyzer that provides an AST inspector
-// (golang.org/x/tools/go/ast/inspect.Inspect) for the syntax trees of a
-// package. It is only a building block for other analyzers.
-//
-// Example of use in another analysis:
-//
-// import (
-// "golang.org/x/tools/go/analysis"
-// "golang.org/x/tools/go/analysis/passes/inspect"
-// "golang.org/x/tools/go/ast/inspector"
-// )
-//
-// var Analyzer = &analysis.Analyzer{
-// ...
-// Requires: []*analysis.Analyzer{inspect.Analyzer},
-// }
-//
-// func run(pass *analysis.Pass) (interface{}, error) {
-// inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector)
-// inspect.Preorder(nil, func(n ast.Node) {
-// ...
-// })
-// return nil
-// }
-//
-package inspect
-
-import (
- "reflect"
-
- "golang.org/x/tools/go/analysis"
- "golang.org/x/tools/go/ast/inspector"
-)
-
-var Analyzer = &analysis.Analyzer{
- Name: "inspect",
- Doc: "optimize AST traversal for later passes",
- Run: run,
- RunDespiteErrors: true,
- ResultType: reflect.TypeOf(new(inspector.Inspector)),
-}
-
-func run(pass *analysis.Pass) (interface{}, error) {
- return inspector.New(pass.Files), nil
-}
diff --git a/vendor/golang.org/x/tools/go/analysis/validate.go b/vendor/golang.org/x/tools/go/analysis/validate.go
deleted file mode 100644
index be98143461e..00000000000
--- a/vendor/golang.org/x/tools/go/analysis/validate.go
+++ /dev/null
@@ -1,97 +0,0 @@
-package analysis
-
-import (
- "fmt"
- "reflect"
- "unicode"
-)
-
-// Validate reports an error if any of the analyzers are misconfigured.
-// Checks include:
-// that the name is a valid identifier;
-// that the Requires graph is acyclic;
-// that analyzer fact types are unique;
-// that each fact type is a pointer.
-func Validate(analyzers []*Analyzer) error {
- // Map each fact type to its sole generating analyzer.
- factTypes := make(map[reflect.Type]*Analyzer)
-
- // Traverse the Requires graph, depth first.
- const (
- white = iota
- grey
- black
- finished
- )
- color := make(map[*Analyzer]uint8)
- var visit func(a *Analyzer) error
- visit = func(a *Analyzer) error {
- if a == nil {
- return fmt.Errorf("nil *Analyzer")
- }
- if color[a] == white {
- color[a] = grey
-
- // names
- if !validIdent(a.Name) {
- return fmt.Errorf("invalid analyzer name %q", a)
- }
-
- if a.Doc == "" {
- return fmt.Errorf("analyzer %q is undocumented", a)
- }
-
- // fact types
- for _, f := range a.FactTypes {
- if f == nil {
- return fmt.Errorf("analyzer %s has nil FactType", a)
- }
- t := reflect.TypeOf(f)
- if prev := factTypes[t]; prev != nil {
- return fmt.Errorf("fact type %s registered by two analyzers: %v, %v",
- t, a, prev)
- }
- if t.Kind() != reflect.Ptr {
- return fmt.Errorf("%s: fact type %s is not a pointer", a, t)
- }
- factTypes[t] = a
- }
-
- // recursion
- for i, req := range a.Requires {
- if err := visit(req); err != nil {
- return fmt.Errorf("%s.Requires[%d]: %v", a.Name, i, err)
- }
- }
- color[a] = black
- }
-
- return nil
- }
- for _, a := range analyzers {
- if err := visit(a); err != nil {
- return err
- }
- }
-
- // Reject duplicates among analyzers.
- // Precondition: color[a] == black.
- // Postcondition: color[a] == finished.
- for _, a := range analyzers {
- if color[a] == finished {
- return fmt.Errorf("duplicate analyzer: %s", a.Name)
- }
- color[a] = finished
- }
-
- return nil
-}
-
-func validIdent(name string) bool {
- for i, r := range name {
- if !(r == '_' || unicode.IsLetter(r) || i > 0 && unicode.IsDigit(r)) {
- return false
- }
- }
- return name != ""
-}
diff --git a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go b/vendor/golang.org/x/tools/go/ast/inspector/inspector.go
deleted file mode 100644
index af5e17feeea..00000000000
--- a/vendor/golang.org/x/tools/go/ast/inspector/inspector.go
+++ /dev/null
@@ -1,186 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package inspector provides helper functions for traversal over the
-// syntax trees of a package, including node filtering by type, and
-// materialization of the traversal stack.
-//
-// During construction, the inspector does a complete traversal and
-// builds a list of push/pop events and their node type. Subsequent
-// method calls that request a traversal scan this list, rather than walk
-// the AST, and perform type filtering using efficient bit sets.
-//
-// Experiments suggest the inspector's traversals are about 2.5x faster
-// than ast.Inspect, but it may take around 5 traversals for this
-// benefit to amortize the inspector's construction cost.
-// If efficiency is the primary concern, do not use Inspector for
-// one-off traversals.
-package inspector
-
-// There are four orthogonal features in a traversal:
-// 1 type filtering
-// 2 pruning
-// 3 postorder calls to f
-// 4 stack
-// Rather than offer all of them in the API,
-// only a few combinations are exposed:
-// - Preorder is the fastest and has fewest features,
-// but is the most commonly needed traversal.
-// - Nodes and WithStack both provide pruning and postorder calls,
-// even though few clients need it, because supporting two versions
-// is not justified.
-// More combinations could be supported by expressing them as
-// wrappers around a more generic traversal, but this was measured
-// and found to degrade performance significantly (30%).
-
-import (
- "go/ast"
-)
-
-// An Inspector provides methods for inspecting
-// (traversing) the syntax trees of a package.
-type Inspector struct {
- events []event
-}
-
-// New returns an Inspector for the specified syntax trees.
-func New(files []*ast.File) *Inspector {
- return &Inspector{traverse(files)}
-}
-
-// An event represents a push or a pop
-// of an ast.Node during a traversal.
-type event struct {
- node ast.Node
- typ uint64 // typeOf(node)
- index int // 1 + index of corresponding pop event, or 0 if this is a pop
-}
-
-// Preorder visits all the nodes of the files supplied to New in
-// depth-first order. It calls f(n) for each node n before it visits
-// n's children.
-//
-// The types argument, if non-empty, enables type-based filtering of
-// events. The function f if is called only for nodes whose type
-// matches an element of the types slice.
-func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) {
- // Because it avoids postorder calls to f, and the pruning
- // check, Preorder is almost twice as fast as Nodes. The two
- // features seem to contribute similar slowdowns (~1.4x each).
-
- mask := maskOf(types)
- for i := 0; i < len(in.events); {
- ev := in.events[i]
- if ev.typ&mask != 0 {
- if ev.index > 0 {
- f(ev.node)
- }
- }
- i++
- }
-}
-
-// Nodes visits the nodes of the files supplied to New in depth-first
-// order. It calls f(n, true) for each node n before it visits n's
-// children. If f returns true, Nodes invokes f recursively for each
-// of the non-nil children of the node, followed by a call of
-// f(n, false).
-//
-// The types argument, if non-empty, enables type-based filtering of
-// events. The function f if is called only for nodes whose type
-// matches an element of the types slice.
-func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (proceed bool)) {
- mask := maskOf(types)
- for i := 0; i < len(in.events); {
- ev := in.events[i]
- if ev.typ&mask != 0 {
- if ev.index > 0 {
- // push
- if !f(ev.node, true) {
- i = ev.index // jump to corresponding pop + 1
- continue
- }
- } else {
- // pop
- f(ev.node, false)
- }
- }
- i++
- }
-}
-
-// WithStack visits nodes in a similar manner to Nodes, but it
-// supplies each call to f an additional argument, the current
-// traversal stack. The stack's first element is the outermost node,
-// an *ast.File; its last is the innermost, n.
-func (in *Inspector) WithStack(types []ast.Node, f func(n ast.Node, push bool, stack []ast.Node) (proceed bool)) {
- mask := maskOf(types)
- var stack []ast.Node
- for i := 0; i < len(in.events); {
- ev := in.events[i]
- if ev.index > 0 {
- // push
- stack = append(stack, ev.node)
- if ev.typ&mask != 0 {
- if !f(ev.node, true, stack) {
- i = ev.index
- stack = stack[:len(stack)-1]
- continue
- }
- }
- } else {
- // pop
- if ev.typ&mask != 0 {
- f(ev.node, false, stack)
- }
- stack = stack[:len(stack)-1]
- }
- i++
- }
-}
-
-// traverse builds the table of events representing a traversal.
-func traverse(files []*ast.File) []event {
- // Preallocate approximate number of events
- // based on source file extent.
- // This makes traverse faster by 4x (!).
- var extent int
- for _, f := range files {
- extent += int(f.End() - f.Pos())
- }
- // This estimate is based on the net/http package.
- capacity := extent * 33 / 100
- if capacity > 1e6 {
- capacity = 1e6 // impose some reasonable maximum
- }
- events := make([]event, 0, capacity)
-
- var stack []event
- for _, f := range files {
- ast.Inspect(f, func(n ast.Node) bool {
- if n != nil {
- // push
- ev := event{
- node: n,
- typ: typeOf(n),
- index: len(events), // push event temporarily holds own index
- }
- stack = append(stack, ev)
- events = append(events, ev)
- } else {
- // pop
- ev := stack[len(stack)-1]
- stack = stack[:len(stack)-1]
-
- events[ev.index].index = len(events) + 1 // make push refer to pop
-
- ev.index = 0 // turn ev into a pop event
- events = append(events, ev)
- }
- return true
- })
- }
-
- return events
-}
diff --git a/vendor/golang.org/x/tools/go/ast/inspector/typeof.go b/vendor/golang.org/x/tools/go/ast/inspector/typeof.go
deleted file mode 100644
index d61301b133d..00000000000
--- a/vendor/golang.org/x/tools/go/ast/inspector/typeof.go
+++ /dev/null
@@ -1,216 +0,0 @@
-package inspector
-
-// This file defines func typeOf(ast.Node) uint64.
-//
-// The initial map-based implementation was too slow;
-// see https://go-review.googlesource.com/c/tools/+/135655/1/go/ast/inspector/inspector.go#196
-
-import "go/ast"
-
-const (
- nArrayType = iota
- nAssignStmt
- nBadDecl
- nBadExpr
- nBadStmt
- nBasicLit
- nBinaryExpr
- nBlockStmt
- nBranchStmt
- nCallExpr
- nCaseClause
- nChanType
- nCommClause
- nComment
- nCommentGroup
- nCompositeLit
- nDeclStmt
- nDeferStmt
- nEllipsis
- nEmptyStmt
- nExprStmt
- nField
- nFieldList
- nFile
- nForStmt
- nFuncDecl
- nFuncLit
- nFuncType
- nGenDecl
- nGoStmt
- nIdent
- nIfStmt
- nImportSpec
- nIncDecStmt
- nIndexExpr
- nInterfaceType
- nKeyValueExpr
- nLabeledStmt
- nMapType
- nPackage
- nParenExpr
- nRangeStmt
- nReturnStmt
- nSelectStmt
- nSelectorExpr
- nSendStmt
- nSliceExpr
- nStarExpr
- nStructType
- nSwitchStmt
- nTypeAssertExpr
- nTypeSpec
- nTypeSwitchStmt
- nUnaryExpr
- nValueSpec
-)
-
-// typeOf returns a distinct single-bit value that represents the type of n.
-//
-// Various implementations were benchmarked with BenchmarkNewInspector:
-// GOGC=off
-// - type switch 4.9-5.5ms 2.1ms
-// - binary search over a sorted list of types 5.5-5.9ms 2.5ms
-// - linear scan, frequency-ordered list 5.9-6.1ms 2.7ms
-// - linear scan, unordered list 6.4ms 2.7ms
-// - hash table 6.5ms 3.1ms
-// A perfect hash seemed like overkill.
-//
-// The compiler's switch statement is the clear winner
-// as it produces a binary tree in code,
-// with constant conditions and good branch prediction.
-// (Sadly it is the most verbose in source code.)
-// Binary search suffered from poor branch prediction.
-//
-func typeOf(n ast.Node) uint64 {
- // Fast path: nearly half of all nodes are identifiers.
- if _, ok := n.(*ast.Ident); ok {
- return 1 << nIdent
- }
-
- // These cases include all nodes encountered by ast.Inspect.
- switch n.(type) {
- case *ast.ArrayType:
- return 1 << nArrayType
- case *ast.AssignStmt:
- return 1 << nAssignStmt
- case *ast.BadDecl:
- return 1 << nBadDecl
- case *ast.BadExpr:
- return 1 << nBadExpr
- case *ast.BadStmt:
- return 1 << nBadStmt
- case *ast.BasicLit:
- return 1 << nBasicLit
- case *ast.BinaryExpr:
- return 1 << nBinaryExpr
- case *ast.BlockStmt:
- return 1 << nBlockStmt
- case *ast.BranchStmt:
- return 1 << nBranchStmt
- case *ast.CallExpr:
- return 1 << nCallExpr
- case *ast.CaseClause:
- return 1 << nCaseClause
- case *ast.ChanType:
- return 1 << nChanType
- case *ast.CommClause:
- return 1 << nCommClause
- case *ast.Comment:
- return 1 << nComment
- case *ast.CommentGroup:
- return 1 << nCommentGroup
- case *ast.CompositeLit:
- return 1 << nCompositeLit
- case *ast.DeclStmt:
- return 1 << nDeclStmt
- case *ast.DeferStmt:
- return 1 << nDeferStmt
- case *ast.Ellipsis:
- return 1 << nEllipsis
- case *ast.EmptyStmt:
- return 1 << nEmptyStmt
- case *ast.ExprStmt:
- return 1 << nExprStmt
- case *ast.Field:
- return 1 << nField
- case *ast.FieldList:
- return 1 << nFieldList
- case *ast.File:
- return 1 << nFile
- case *ast.ForStmt:
- return 1 << nForStmt
- case *ast.FuncDecl:
- return 1 << nFuncDecl
- case *ast.FuncLit:
- return 1 << nFuncLit
- case *ast.FuncType:
- return 1 << nFuncType
- case *ast.GenDecl:
- return 1 << nGenDecl
- case *ast.GoStmt:
- return 1 << nGoStmt
- case *ast.Ident:
- return 1 << nIdent
- case *ast.IfStmt:
- return 1 << nIfStmt
- case *ast.ImportSpec:
- return 1 << nImportSpec
- case *ast.IncDecStmt:
- return 1 << nIncDecStmt
- case *ast.IndexExpr:
- return 1 << nIndexExpr
- case *ast.InterfaceType:
- return 1 << nInterfaceType
- case *ast.KeyValueExpr:
- return 1 << nKeyValueExpr
- case *ast.LabeledStmt:
- return 1 << nLabeledStmt
- case *ast.MapType:
- return 1 << nMapType
- case *ast.Package:
- return 1 << nPackage
- case *ast.ParenExpr:
- return 1 << nParenExpr
- case *ast.RangeStmt:
- return 1 << nRangeStmt
- case *ast.ReturnStmt:
- return 1 << nReturnStmt
- case *ast.SelectStmt:
- return 1 << nSelectStmt
- case *ast.SelectorExpr:
- return 1 << nSelectorExpr
- case *ast.SendStmt:
- return 1 << nSendStmt
- case *ast.SliceExpr:
- return 1 << nSliceExpr
- case *ast.StarExpr:
- return 1 << nStarExpr
- case *ast.StructType:
- return 1 << nStructType
- case *ast.SwitchStmt:
- return 1 << nSwitchStmt
- case *ast.TypeAssertExpr:
- return 1 << nTypeAssertExpr
- case *ast.TypeSpec:
- return 1 << nTypeSpec
- case *ast.TypeSwitchStmt:
- return 1 << nTypeSwitchStmt
- case *ast.UnaryExpr:
- return 1 << nUnaryExpr
- case *ast.ValueSpec:
- return 1 << nValueSpec
- }
- return 0
-}
-
-func maskOf(nodes []ast.Node) uint64 {
- if nodes == nil {
- return 1<<64 - 1 // match all node types
- }
- var mask uint64
- for _, n := range nodes {
- mask |= typeOf(n)
- }
- return mask
-}
diff --git a/vendor/golang.org/x/tools/go/buildutil/allpackages.go b/vendor/golang.org/x/tools/go/buildutil/allpackages.go
deleted file mode 100644
index c0cb03e7bee..00000000000
--- a/vendor/golang.org/x/tools/go/buildutil/allpackages.go
+++ /dev/null
@@ -1,198 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package buildutil provides utilities related to the go/build
-// package in the standard library.
-//
-// All I/O is done via the build.Context file system interface, which must
-// be concurrency-safe.
-package buildutil // import "golang.org/x/tools/go/buildutil"
-
-import (
- "go/build"
- "os"
- "path/filepath"
- "sort"
- "strings"
- "sync"
-)
-
-// AllPackages returns the package path of each Go package in any source
-// directory of the specified build context (e.g. $GOROOT or an element
-// of $GOPATH). Errors are ignored. The results are sorted.
-// All package paths are canonical, and thus may contain "/vendor/".
-//
-// The result may include import paths for directories that contain no
-// *.go files, such as "archive" (in $GOROOT/src).
-//
-// All I/O is done via the build.Context file system interface,
-// which must be concurrency-safe.
-//
-func AllPackages(ctxt *build.Context) []string {
- var list []string
- ForEachPackage(ctxt, func(pkg string, _ error) {
- list = append(list, pkg)
- })
- sort.Strings(list)
- return list
-}
-
-// ForEachPackage calls the found function with the package path of
-// each Go package it finds in any source directory of the specified
-// build context (e.g. $GOROOT or an element of $GOPATH).
-// All package paths are canonical, and thus may contain "/vendor/".
-//
-// If the package directory exists but could not be read, the second
-// argument to the found function provides the error.
-//
-// All I/O is done via the build.Context file system interface,
-// which must be concurrency-safe.
-//
-func ForEachPackage(ctxt *build.Context, found func(importPath string, err error)) {
- ch := make(chan item)
-
- var wg sync.WaitGroup
- for _, root := range ctxt.SrcDirs() {
- root := root
- wg.Add(1)
- go func() {
- allPackages(ctxt, root, ch)
- wg.Done()
- }()
- }
- go func() {
- wg.Wait()
- close(ch)
- }()
-
- // All calls to found occur in the caller's goroutine.
- for i := range ch {
- found(i.importPath, i.err)
- }
-}
-
-type item struct {
- importPath string
- err error // (optional)
-}
-
-// We use a process-wide counting semaphore to limit
-// the number of parallel calls to ReadDir.
-var ioLimit = make(chan bool, 20)
-
-func allPackages(ctxt *build.Context, root string, ch chan<- item) {
- root = filepath.Clean(root) + string(os.PathSeparator)
-
- var wg sync.WaitGroup
-
- var walkDir func(dir string)
- walkDir = func(dir string) {
- // Avoid .foo, _foo, and testdata directory trees.
- base := filepath.Base(dir)
- if base == "" || base[0] == '.' || base[0] == '_' || base == "testdata" {
- return
- }
-
- pkg := filepath.ToSlash(strings.TrimPrefix(dir, root))
-
- // Prune search if we encounter any of these import paths.
- switch pkg {
- case "builtin":
- return
- }
-
- ioLimit <- true
- files, err := ReadDir(ctxt, dir)
- <-ioLimit
- if pkg != "" || err != nil {
- ch <- item{pkg, err}
- }
- for _, fi := range files {
- fi := fi
- if fi.IsDir() {
- wg.Add(1)
- go func() {
- walkDir(filepath.Join(dir, fi.Name()))
- wg.Done()
- }()
- }
- }
- }
-
- walkDir(root)
- wg.Wait()
-}
-
-// ExpandPatterns returns the set of packages matched by patterns,
-// which may have the following forms:
-//
-// golang.org/x/tools/cmd/guru # a single package
-// golang.org/x/tools/... # all packages beneath dir
-// ... # the entire workspace.
-//
-// Order is significant: a pattern preceded by '-' removes matching
-// packages from the set. For example, these patterns match all encoding
-// packages except encoding/xml:
-//
-// encoding/... -encoding/xml
-//
-// A trailing slash in a pattern is ignored. (Path components of Go
-// package names are separated by slash, not the platform's path separator.)
-//
-func ExpandPatterns(ctxt *build.Context, patterns []string) map[string]bool {
- // TODO(adonovan): support other features of 'go list':
- // - "std"/"cmd"/"all" meta-packages
- // - "..." not at the end of a pattern
- // - relative patterns using "./" or "../" prefix
-
- pkgs := make(map[string]bool)
- doPkg := func(pkg string, neg bool) {
- if neg {
- delete(pkgs, pkg)
- } else {
- pkgs[pkg] = true
- }
- }
-
- // Scan entire workspace if wildcards are present.
- // TODO(adonovan): opt: scan only the necessary subtrees of the workspace.
- var all []string
- for _, arg := range patterns {
- if strings.HasSuffix(arg, "...") {
- all = AllPackages(ctxt)
- break
- }
- }
-
- for _, arg := range patterns {
- if arg == "" {
- continue
- }
-
- neg := arg[0] == '-'
- if neg {
- arg = arg[1:]
- }
-
- if arg == "..." {
- // ... matches all packages
- for _, pkg := range all {
- doPkg(pkg, neg)
- }
- } else if dir := strings.TrimSuffix(arg, "/..."); dir != arg {
- // dir/... matches all packages beneath dir
- for _, pkg := range all {
- if strings.HasPrefix(pkg, dir) &&
- (len(pkg) == len(dir) || pkg[len(dir)] == '/') {
- doPkg(pkg, neg)
- }
- }
- } else {
- // single package
- doPkg(strings.TrimSuffix(arg, "/"), neg)
- }
- }
-
- return pkgs
-}
diff --git a/vendor/golang.org/x/tools/go/buildutil/fakecontext.go b/vendor/golang.org/x/tools/go/buildutil/fakecontext.go
deleted file mode 100644
index 8b7f066739f..00000000000
--- a/vendor/golang.org/x/tools/go/buildutil/fakecontext.go
+++ /dev/null
@@ -1,109 +0,0 @@
-package buildutil
-
-import (
- "fmt"
- "go/build"
- "io"
- "io/ioutil"
- "os"
- "path"
- "path/filepath"
- "sort"
- "strings"
- "time"
-)
-
-// FakeContext returns a build.Context for the fake file tree specified
-// by pkgs, which maps package import paths to a mapping from file base
-// names to contents.
-//
-// The fake Context has a GOROOT of "/go" and no GOPATH, and overrides
-// the necessary file access methods to read from memory instead of the
-// real file system.
-//
-// Unlike a real file tree, the fake one has only two levels---packages
-// and files---so ReadDir("/go/src/") returns all packages under
-// /go/src/ including, for instance, "math" and "math/big".
-// ReadDir("/go/src/math/big") would return all the files in the
-// "math/big" package.
-//
-func FakeContext(pkgs map[string]map[string]string) *build.Context {
- clean := func(filename string) string {
- f := path.Clean(filepath.ToSlash(filename))
- // Removing "/go/src" while respecting segment
- // boundaries has this unfortunate corner case:
- if f == "/go/src" {
- return ""
- }
- return strings.TrimPrefix(f, "/go/src/")
- }
-
- ctxt := build.Default // copy
- ctxt.GOROOT = "/go"
- ctxt.GOPATH = ""
- ctxt.Compiler = "gc"
- ctxt.IsDir = func(dir string) bool {
- dir = clean(dir)
- if dir == "" {
- return true // needed by (*build.Context).SrcDirs
- }
- return pkgs[dir] != nil
- }
- ctxt.ReadDir = func(dir string) ([]os.FileInfo, error) {
- dir = clean(dir)
- var fis []os.FileInfo
- if dir == "" {
- // enumerate packages
- for importPath := range pkgs {
- fis = append(fis, fakeDirInfo(importPath))
- }
- } else {
- // enumerate files of package
- for basename := range pkgs[dir] {
- fis = append(fis, fakeFileInfo(basename))
- }
- }
- sort.Sort(byName(fis))
- return fis, nil
- }
- ctxt.OpenFile = func(filename string) (io.ReadCloser, error) {
- filename = clean(filename)
- dir, base := path.Split(filename)
- content, ok := pkgs[path.Clean(dir)][base]
- if !ok {
- return nil, fmt.Errorf("file not found: %s", filename)
- }
- return ioutil.NopCloser(strings.NewReader(content)), nil
- }
- ctxt.IsAbsPath = func(path string) bool {
- path = filepath.ToSlash(path)
- // Don't rely on the default (filepath.Path) since on
- // Windows, it reports virtual paths as non-absolute.
- return strings.HasPrefix(path, "/")
- }
- return &ctxt
-}
-
-type byName []os.FileInfo
-
-func (s byName) Len() int { return len(s) }
-func (s byName) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
-func (s byName) Less(i, j int) bool { return s[i].Name() < s[j].Name() }
-
-type fakeFileInfo string
-
-func (fi fakeFileInfo) Name() string { return string(fi) }
-func (fakeFileInfo) Sys() interface{} { return nil }
-func (fakeFileInfo) ModTime() time.Time { return time.Time{} }
-func (fakeFileInfo) IsDir() bool { return false }
-func (fakeFileInfo) Size() int64 { return 0 }
-func (fakeFileInfo) Mode() os.FileMode { return 0644 }
-
-type fakeDirInfo string
-
-func (fd fakeDirInfo) Name() string { return string(fd) }
-func (fakeDirInfo) Sys() interface{} { return nil }
-func (fakeDirInfo) ModTime() time.Time { return time.Time{} }
-func (fakeDirInfo) IsDir() bool { return true }
-func (fakeDirInfo) Size() int64 { return 0 }
-func (fakeDirInfo) Mode() os.FileMode { return 0755 }
diff --git a/vendor/golang.org/x/tools/go/buildutil/overlay.go b/vendor/golang.org/x/tools/go/buildutil/overlay.go
deleted file mode 100644
index 8e239086bd4..00000000000
--- a/vendor/golang.org/x/tools/go/buildutil/overlay.go
+++ /dev/null
@@ -1,103 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package buildutil
-
-import (
- "bufio"
- "bytes"
- "fmt"
- "go/build"
- "io"
- "io/ioutil"
- "path/filepath"
- "strconv"
- "strings"
-)
-
-// OverlayContext overlays a build.Context with additional files from
-// a map. Files in the map take precedence over other files.
-//
-// In addition to plain string comparison, two file names are
-// considered equal if their base names match and their directory
-// components point at the same directory on the file system. That is,
-// symbolic links are followed for directories, but not files.
-//
-// A common use case for OverlayContext is to allow editors to pass in
-// a set of unsaved, modified files.
-//
-// Currently, only the Context.OpenFile function will respect the
-// overlay. This may change in the future.
-func OverlayContext(orig *build.Context, overlay map[string][]byte) *build.Context {
- // TODO(dominikh): Implement IsDir, HasSubdir and ReadDir
-
- rc := func(data []byte) (io.ReadCloser, error) {
- return ioutil.NopCloser(bytes.NewBuffer(data)), nil
- }
-
- copy := *orig // make a copy
- ctxt := ©
- ctxt.OpenFile = func(path string) (io.ReadCloser, error) {
- // Fast path: names match exactly.
- if content, ok := overlay[path]; ok {
- return rc(content)
- }
-
- // Slow path: check for same file under a different
- // alias, perhaps due to a symbolic link.
- for filename, content := range overlay {
- if sameFile(path, filename) {
- return rc(content)
- }
- }
-
- return OpenFile(orig, path)
- }
- return ctxt
-}
-
-// ParseOverlayArchive parses an archive containing Go files and their
-// contents. The result is intended to be used with OverlayContext.
-//
-//
-// Archive format
-//
-// The archive consists of a series of files. Each file consists of a
-// name, a decimal file size and the file contents, separated by
-// newlines. No newline follows after the file contents.
-func ParseOverlayArchive(archive io.Reader) (map[string][]byte, error) {
- overlay := make(map[string][]byte)
- r := bufio.NewReader(archive)
- for {
- // Read file name.
- filename, err := r.ReadString('\n')
- if err != nil {
- if err == io.EOF {
- break // OK
- }
- return nil, fmt.Errorf("reading archive file name: %v", err)
- }
- filename = filepath.Clean(strings.TrimSpace(filename))
-
- // Read file size.
- sz, err := r.ReadString('\n')
- if err != nil {
- return nil, fmt.Errorf("reading size of archive file %s: %v", filename, err)
- }
- sz = strings.TrimSpace(sz)
- size, err := strconv.ParseUint(sz, 10, 32)
- if err != nil {
- return nil, fmt.Errorf("parsing size of archive file %s: %v", filename, err)
- }
-
- // Read file content.
- content := make([]byte, size)
- if _, err := io.ReadFull(r, content); err != nil {
- return nil, fmt.Errorf("reading archive file %s: %v", filename, err)
- }
- overlay[filename] = content
- }
-
- return overlay, nil
-}
diff --git a/vendor/golang.org/x/tools/go/buildutil/tags.go b/vendor/golang.org/x/tools/go/buildutil/tags.go
deleted file mode 100644
index 486606f3768..00000000000
--- a/vendor/golang.org/x/tools/go/buildutil/tags.go
+++ /dev/null
@@ -1,75 +0,0 @@
-package buildutil
-
-// This logic was copied from stringsFlag from $GOROOT/src/cmd/go/build.go.
-
-import "fmt"
-
-const TagsFlagDoc = "a list of `build tags` to consider satisfied during the build. " +
- "For more information about build tags, see the description of " +
- "build constraints in the documentation for the go/build package"
-
-// TagsFlag is an implementation of the flag.Value and flag.Getter interfaces that parses
-// a flag value in the same manner as go build's -tags flag and
-// populates a []string slice.
-//
-// See $GOROOT/src/go/build/doc.go for description of build tags.
-// See $GOROOT/src/cmd/go/doc.go for description of 'go build -tags' flag.
-//
-// Example:
-// flag.Var((*buildutil.TagsFlag)(&build.Default.BuildTags), "tags", buildutil.TagsFlagDoc)
-type TagsFlag []string
-
-func (v *TagsFlag) Set(s string) error {
- var err error
- *v, err = splitQuotedFields(s)
- if *v == nil {
- *v = []string{}
- }
- return err
-}
-
-func (v *TagsFlag) Get() interface{} { return *v }
-
-func splitQuotedFields(s string) ([]string, error) {
- // Split fields allowing '' or "" around elements.
- // Quotes further inside the string do not count.
- var f []string
- for len(s) > 0 {
- for len(s) > 0 && isSpaceByte(s[0]) {
- s = s[1:]
- }
- if len(s) == 0 {
- break
- }
- // Accepted quoted string. No unescaping inside.
- if s[0] == '"' || s[0] == '\'' {
- quote := s[0]
- s = s[1:]
- i := 0
- for i < len(s) && s[i] != quote {
- i++
- }
- if i >= len(s) {
- return nil, fmt.Errorf("unterminated %c string", quote)
- }
- f = append(f, s[:i])
- s = s[i+1:]
- continue
- }
- i := 0
- for i < len(s) && !isSpaceByte(s[i]) {
- i++
- }
- f = append(f, s[:i])
- s = s[i:]
- }
- return f, nil
-}
-
-func (v *TagsFlag) String() string {
- return ""
-}
-
-func isSpaceByte(c byte) bool {
- return c == ' ' || c == '\t' || c == '\n' || c == '\r'
-}
diff --git a/vendor/golang.org/x/tools/go/buildutil/util.go b/vendor/golang.org/x/tools/go/buildutil/util.go
deleted file mode 100644
index fc923d7a702..00000000000
--- a/vendor/golang.org/x/tools/go/buildutil/util.go
+++ /dev/null
@@ -1,212 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package buildutil
-
-import (
- "fmt"
- "go/ast"
- "go/build"
- "go/parser"
- "go/token"
- "io"
- "io/ioutil"
- "os"
- "path"
- "path/filepath"
- "strings"
-)
-
-// ParseFile behaves like parser.ParseFile,
-// but uses the build context's file system interface, if any.
-//
-// If file is not absolute (as defined by IsAbsPath), the (dir, file)
-// components are joined using JoinPath; dir must be absolute.
-//
-// The displayPath function, if provided, is used to transform the
-// filename that will be attached to the ASTs.
-//
-// TODO(adonovan): call this from go/loader.parseFiles when the tree thaws.
-//
-func ParseFile(fset *token.FileSet, ctxt *build.Context, displayPath func(string) string, dir string, file string, mode parser.Mode) (*ast.File, error) {
- if !IsAbsPath(ctxt, file) {
- file = JoinPath(ctxt, dir, file)
- }
- rd, err := OpenFile(ctxt, file)
- if err != nil {
- return nil, err
- }
- defer rd.Close() // ignore error
- if displayPath != nil {
- file = displayPath(file)
- }
- return parser.ParseFile(fset, file, rd, mode)
-}
-
-// ContainingPackage returns the package containing filename.
-//
-// If filename is not absolute, it is interpreted relative to working directory dir.
-// All I/O is via the build context's file system interface, if any.
-//
-// The '...Files []string' fields of the resulting build.Package are not
-// populated (build.FindOnly mode).
-//
-func ContainingPackage(ctxt *build.Context, dir, filename string) (*build.Package, error) {
- if !IsAbsPath(ctxt, filename) {
- filename = JoinPath(ctxt, dir, filename)
- }
-
- // We must not assume the file tree uses
- // "/" always,
- // `\` always,
- // or os.PathSeparator (which varies by platform),
- // but to make any progress, we are forced to assume that
- // paths will not use `\` unless the PathSeparator
- // is also `\`, thus we can rely on filepath.ToSlash for some sanity.
-
- dirSlash := path.Dir(filepath.ToSlash(filename)) + "/"
-
- // We assume that no source root (GOPATH[i] or GOROOT) contains any other.
- for _, srcdir := range ctxt.SrcDirs() {
- srcdirSlash := filepath.ToSlash(srcdir) + "/"
- if importPath, ok := HasSubdir(ctxt, srcdirSlash, dirSlash); ok {
- return ctxt.Import(importPath, dir, build.FindOnly)
- }
- }
-
- return nil, fmt.Errorf("can't find package containing %s", filename)
-}
-
-// -- Effective methods of file system interface -------------------------
-
-// (go/build.Context defines these as methods, but does not export them.)
-
-// hasSubdir calls ctxt.HasSubdir (if not nil) or else uses
-// the local file system to answer the question.
-func HasSubdir(ctxt *build.Context, root, dir string) (rel string, ok bool) {
- if f := ctxt.HasSubdir; f != nil {
- return f(root, dir)
- }
-
- // Try using paths we received.
- if rel, ok = hasSubdir(root, dir); ok {
- return
- }
-
- // Try expanding symlinks and comparing
- // expanded against unexpanded and
- // expanded against expanded.
- rootSym, _ := filepath.EvalSymlinks(root)
- dirSym, _ := filepath.EvalSymlinks(dir)
-
- if rel, ok = hasSubdir(rootSym, dir); ok {
- return
- }
- if rel, ok = hasSubdir(root, dirSym); ok {
- return
- }
- return hasSubdir(rootSym, dirSym)
-}
-
-func hasSubdir(root, dir string) (rel string, ok bool) {
- const sep = string(filepath.Separator)
- root = filepath.Clean(root)
- if !strings.HasSuffix(root, sep) {
- root += sep
- }
-
- dir = filepath.Clean(dir)
- if !strings.HasPrefix(dir, root) {
- return "", false
- }
-
- return filepath.ToSlash(dir[len(root):]), true
-}
-
-// FileExists returns true if the specified file exists,
-// using the build context's file system interface.
-func FileExists(ctxt *build.Context, path string) bool {
- if ctxt.OpenFile != nil {
- r, err := ctxt.OpenFile(path)
- if err != nil {
- return false
- }
- r.Close() // ignore error
- return true
- }
- _, err := os.Stat(path)
- return err == nil
-}
-
-// OpenFile behaves like os.Open,
-// but uses the build context's file system interface, if any.
-func OpenFile(ctxt *build.Context, path string) (io.ReadCloser, error) {
- if ctxt.OpenFile != nil {
- return ctxt.OpenFile(path)
- }
- return os.Open(path)
-}
-
-// IsAbsPath behaves like filepath.IsAbs,
-// but uses the build context's file system interface, if any.
-func IsAbsPath(ctxt *build.Context, path string) bool {
- if ctxt.IsAbsPath != nil {
- return ctxt.IsAbsPath(path)
- }
- return filepath.IsAbs(path)
-}
-
-// JoinPath behaves like filepath.Join,
-// but uses the build context's file system interface, if any.
-func JoinPath(ctxt *build.Context, path ...string) string {
- if ctxt.JoinPath != nil {
- return ctxt.JoinPath(path...)
- }
- return filepath.Join(path...)
-}
-
-// IsDir behaves like os.Stat plus IsDir,
-// but uses the build context's file system interface, if any.
-func IsDir(ctxt *build.Context, path string) bool {
- if ctxt.IsDir != nil {
- return ctxt.IsDir(path)
- }
- fi, err := os.Stat(path)
- return err == nil && fi.IsDir()
-}
-
-// ReadDir behaves like ioutil.ReadDir,
-// but uses the build context's file system interface, if any.
-func ReadDir(ctxt *build.Context, path string) ([]os.FileInfo, error) {
- if ctxt.ReadDir != nil {
- return ctxt.ReadDir(path)
- }
- return ioutil.ReadDir(path)
-}
-
-// SplitPathList behaves like filepath.SplitList,
-// but uses the build context's file system interface, if any.
-func SplitPathList(ctxt *build.Context, s string) []string {
- if ctxt.SplitPathList != nil {
- return ctxt.SplitPathList(s)
- }
- return filepath.SplitList(s)
-}
-
-// sameFile returns true if x and y have the same basename and denote
-// the same file.
-//
-func sameFile(x, y string) bool {
- if path.Clean(x) == path.Clean(y) {
- return true
- }
- if filepath.Base(x) == filepath.Base(y) { // (optimisation)
- if xi, err := os.Stat(x); err == nil {
- if yi, err := os.Stat(y); err == nil {
- return os.SameFile(xi, yi)
- }
- }
- }
- return false
-}
diff --git a/vendor/golang.org/x/tools/go/internal/cgo/cgo.go b/vendor/golang.org/x/tools/go/internal/cgo/cgo.go
deleted file mode 100644
index 5db8b309676..00000000000
--- a/vendor/golang.org/x/tools/go/internal/cgo/cgo.go
+++ /dev/null
@@ -1,220 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package cgo handles cgo preprocessing of files containing `import "C"`.
-//
-// DESIGN
-//
-// The approach taken is to run the cgo processor on the package's
-// CgoFiles and parse the output, faking the filenames of the
-// resulting ASTs so that the synthetic file containing the C types is
-// called "C" (e.g. "~/go/src/net/C") and the preprocessed files
-// have their original names (e.g. "~/go/src/net/cgo_unix.go"),
-// not the names of the actual temporary files.
-//
-// The advantage of this approach is its fidelity to 'go build'. The
-// downside is that the token.Position.Offset for each AST node is
-// incorrect, being an offset within the temporary file. Line numbers
-// should still be correct because of the //line comments.
-//
-// The logic of this file is mostly plundered from the 'go build'
-// tool, which also invokes the cgo preprocessor.
-//
-//
-// REJECTED ALTERNATIVE
-//
-// An alternative approach that we explored is to extend go/types'
-// Importer mechanism to provide the identity of the importing package
-// so that each time `import "C"` appears it resolves to a different
-// synthetic package containing just the objects needed in that case.
-// The loader would invoke cgo but parse only the cgo_types.go file
-// defining the package-level objects, discarding the other files
-// resulting from preprocessing.
-//
-// The benefit of this approach would have been that source-level
-// syntax information would correspond exactly to the original cgo
-// file, with no preprocessing involved, making source tools like
-// godoc, guru, and eg happy. However, the approach was rejected
-// due to the additional complexity it would impose on go/types. (It
-// made for a beautiful demo, though.)
-//
-// cgo files, despite their *.go extension, are not legal Go source
-// files per the specification since they may refer to unexported
-// members of package "C" such as C.int. Also, a function such as
-// C.getpwent has in effect two types, one matching its C type and one
-// which additionally returns (errno C.int). The cgo preprocessor
-// uses name mangling to distinguish these two functions in the
-// processed code, but go/types would need to duplicate this logic in
-// its handling of function calls, analogous to the treatment of map
-// lookups in which y=m[k] and y,ok=m[k] are both legal.
-
-package cgo
-
-import (
- "fmt"
- "go/ast"
- "go/build"
- "go/parser"
- "go/token"
- "io/ioutil"
- "log"
- "os"
- "os/exec"
- "path/filepath"
- "regexp"
- "strings"
-)
-
-// ProcessFiles invokes the cgo preprocessor on bp.CgoFiles, parses
-// the output and returns the resulting ASTs.
-//
-func ProcessFiles(bp *build.Package, fset *token.FileSet, DisplayPath func(path string) string, mode parser.Mode) ([]*ast.File, error) {
- tmpdir, err := ioutil.TempDir("", strings.Replace(bp.ImportPath, "/", "_", -1)+"_C")
- if err != nil {
- return nil, err
- }
- defer os.RemoveAll(tmpdir)
-
- pkgdir := bp.Dir
- if DisplayPath != nil {
- pkgdir = DisplayPath(pkgdir)
- }
-
- cgoFiles, cgoDisplayFiles, err := Run(bp, pkgdir, tmpdir, false)
- if err != nil {
- return nil, err
- }
- var files []*ast.File
- for i := range cgoFiles {
- rd, err := os.Open(cgoFiles[i])
- if err != nil {
- return nil, err
- }
- display := filepath.Join(bp.Dir, cgoDisplayFiles[i])
- f, err := parser.ParseFile(fset, display, rd, mode)
- rd.Close()
- if err != nil {
- return nil, err
- }
- files = append(files, f)
- }
- return files, nil
-}
-
-var cgoRe = regexp.MustCompile(`[/\\:]`)
-
-// Run invokes the cgo preprocessor on bp.CgoFiles and returns two
-// lists of files: the resulting processed files (in temporary
-// directory tmpdir) and the corresponding names of the unprocessed files.
-//
-// Run is adapted from (*builder).cgo in
-// $GOROOT/src/cmd/go/build.go, but these features are unsupported:
-// Objective C, CGOPKGPATH, CGO_FLAGS.
-//
-// If useabs is set to true, absolute paths of the bp.CgoFiles will be passed in
-// to the cgo preprocessor. This in turn will set the // line comments
-// referring to those files to use absolute paths. This is needed for
-// go/packages using the legacy go list support so it is able to find
-// the original files.
-func Run(bp *build.Package, pkgdir, tmpdir string, useabs bool) (files, displayFiles []string, err error) {
- cgoCPPFLAGS, _, _, _ := cflags(bp, true)
- _, cgoexeCFLAGS, _, _ := cflags(bp, false)
-
- if len(bp.CgoPkgConfig) > 0 {
- pcCFLAGS, err := pkgConfigFlags(bp)
- if err != nil {
- return nil, nil, err
- }
- cgoCPPFLAGS = append(cgoCPPFLAGS, pcCFLAGS...)
- }
-
- // Allows including _cgo_export.h from .[ch] files in the package.
- cgoCPPFLAGS = append(cgoCPPFLAGS, "-I", tmpdir)
-
- // _cgo_gotypes.go (displayed "C") contains the type definitions.
- files = append(files, filepath.Join(tmpdir, "_cgo_gotypes.go"))
- displayFiles = append(displayFiles, "C")
- for _, fn := range bp.CgoFiles {
- // "foo.cgo1.go" (displayed "foo.go") is the processed Go source.
- f := cgoRe.ReplaceAllString(fn[:len(fn)-len("go")], "_")
- files = append(files, filepath.Join(tmpdir, f+"cgo1.go"))
- displayFiles = append(displayFiles, fn)
- }
-
- var cgoflags []string
- if bp.Goroot && bp.ImportPath == "runtime/cgo" {
- cgoflags = append(cgoflags, "-import_runtime_cgo=false")
- }
- if bp.Goroot && bp.ImportPath == "runtime/race" || bp.ImportPath == "runtime/cgo" {
- cgoflags = append(cgoflags, "-import_syscall=false")
- }
-
- var cgoFiles []string = bp.CgoFiles
- if useabs {
- cgoFiles = make([]string, len(bp.CgoFiles))
- for i := range cgoFiles {
- cgoFiles[i] = filepath.Join(pkgdir, bp.CgoFiles[i])
- }
- }
-
- args := stringList(
- "go", "tool", "cgo", "-objdir", tmpdir, cgoflags, "--",
- cgoCPPFLAGS, cgoexeCFLAGS, cgoFiles,
- )
- if false {
- log.Printf("Running cgo for package %q: %s (dir=%s)", bp.ImportPath, args, pkgdir)
- }
- cmd := exec.Command(args[0], args[1:]...)
- cmd.Dir = pkgdir
- cmd.Stdout = os.Stderr
- cmd.Stderr = os.Stderr
- if err := cmd.Run(); err != nil {
- return nil, nil, fmt.Errorf("cgo failed: %s: %s", args, err)
- }
-
- return files, displayFiles, nil
-}
-
-// -- unmodified from 'go build' ---------------------------------------
-
-// Return the flags to use when invoking the C or C++ compilers, or cgo.
-func cflags(p *build.Package, def bool) (cppflags, cflags, cxxflags, ldflags []string) {
- var defaults string
- if def {
- defaults = "-g -O2"
- }
-
- cppflags = stringList(envList("CGO_CPPFLAGS", ""), p.CgoCPPFLAGS)
- cflags = stringList(envList("CGO_CFLAGS", defaults), p.CgoCFLAGS)
- cxxflags = stringList(envList("CGO_CXXFLAGS", defaults), p.CgoCXXFLAGS)
- ldflags = stringList(envList("CGO_LDFLAGS", defaults), p.CgoLDFLAGS)
- return
-}
-
-// envList returns the value of the given environment variable broken
-// into fields, using the default value when the variable is empty.
-func envList(key, def string) []string {
- v := os.Getenv(key)
- if v == "" {
- v = def
- }
- return strings.Fields(v)
-}
-
-// stringList's arguments should be a sequence of string or []string values.
-// stringList flattens them into a single []string.
-func stringList(args ...interface{}) []string {
- var x []string
- for _, arg := range args {
- switch arg := arg.(type) {
- case []string:
- x = append(x, arg...)
- case string:
- x = append(x, arg)
- default:
- panic("stringList: invalid argument")
- }
- }
- return x
-}
diff --git a/vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go b/vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go
deleted file mode 100644
index b5bb95a63e5..00000000000
--- a/vendor/golang.org/x/tools/go/internal/cgo/cgo_pkgconfig.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cgo
-
-import (
- "errors"
- "fmt"
- "go/build"
- "os/exec"
- "strings"
-)
-
-// pkgConfig runs pkg-config with the specified arguments and returns the flags it prints.
-func pkgConfig(mode string, pkgs []string) (flags []string, err error) {
- cmd := exec.Command("pkg-config", append([]string{mode}, pkgs...)...)
- out, err := cmd.CombinedOutput()
- if err != nil {
- s := fmt.Sprintf("%s failed: %v", strings.Join(cmd.Args, " "), err)
- if len(out) > 0 {
- s = fmt.Sprintf("%s: %s", s, out)
- }
- return nil, errors.New(s)
- }
- if len(out) > 0 {
- flags = strings.Fields(string(out))
- }
- return
-}
-
-// pkgConfigFlags calls pkg-config if needed and returns the cflags
-// needed to build the package.
-func pkgConfigFlags(p *build.Package) (cflags []string, err error) {
- if len(p.CgoPkgConfig) == 0 {
- return nil, nil
- }
- return pkgConfig("--cflags", p.CgoPkgConfig)
-}
diff --git a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go
deleted file mode 100644
index dc6177c122d..00000000000
--- a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go
+++ /dev/null
@@ -1,117 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package packagesdriver fetches type sizes for go/packages and go/analysis.
-package packagesdriver
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "fmt"
- "go/types"
- "os/exec"
- "strings"
-
- "golang.org/x/tools/internal/gocommand"
-)
-
-var debug = false
-
-func GetSizes(ctx context.Context, buildFlags, env []string, gocmdRunner *gocommand.Runner, dir string) (types.Sizes, error) {
- // TODO(matloob): Clean this up. This code is mostly a copy of packages.findExternalDriver.
- const toolPrefix = "GOPACKAGESDRIVER="
- tool := ""
- for _, env := range env {
- if val := strings.TrimPrefix(env, toolPrefix); val != env {
- tool = val
- }
- }
-
- if tool == "" {
- var err error
- tool, err = exec.LookPath("gopackagesdriver")
- if err != nil {
- // We did not find the driver, so use "go list".
- tool = "off"
- }
- }
-
- if tool == "off" {
- return GetSizesGolist(ctx, buildFlags, env, gocmdRunner, dir)
- }
-
- req, err := json.Marshal(struct {
- Command string `json:"command"`
- Env []string `json:"env"`
- BuildFlags []string `json:"build_flags"`
- }{
- Command: "sizes",
- Env: env,
- BuildFlags: buildFlags,
- })
- if err != nil {
- return nil, fmt.Errorf("failed to encode message to driver tool: %v", err)
- }
-
- buf := new(bytes.Buffer)
- cmd := exec.CommandContext(ctx, tool)
- cmd.Dir = dir
- cmd.Env = env
- cmd.Stdin = bytes.NewReader(req)
- cmd.Stdout = buf
- cmd.Stderr = new(bytes.Buffer)
- if err := cmd.Run(); err != nil {
- return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr)
- }
- var response struct {
- // Sizes, if not nil, is the types.Sizes to use when type checking.
- Sizes *types.StdSizes
- }
- if err := json.Unmarshal(buf.Bytes(), &response); err != nil {
- return nil, err
- }
- return response.Sizes, nil
-}
-
-func GetSizesGolist(ctx context.Context, buildFlags, env []string, gocmdRunner *gocommand.Runner, dir string) (types.Sizes, error) {
- inv := gocommand.Invocation{
- Verb: "list",
- Args: []string{"-f", "{{context.GOARCH}} {{context.Compiler}}", "--", "unsafe"},
- Env: env,
- BuildFlags: buildFlags,
- WorkingDir: dir,
- }
- stdout, stderr, friendlyErr, rawErr := gocmdRunner.RunRaw(ctx, inv)
- var goarch, compiler string
- if rawErr != nil {
- if strings.Contains(rawErr.Error(), "cannot find main module") {
- // User's running outside of a module. All bets are off. Get GOARCH and guess compiler is gc.
- // TODO(matloob): Is this a problem in practice?
- inv := gocommand.Invocation{
- Verb: "env",
- Args: []string{"GOARCH"},
- Env: env,
- WorkingDir: dir,
- }
- envout, enverr := gocmdRunner.Run(ctx, inv)
- if enverr != nil {
- return nil, enverr
- }
- goarch = strings.TrimSpace(envout.String())
- compiler = "gc"
- } else {
- return nil, friendlyErr
- }
- } else {
- fields := strings.Fields(stdout.String())
- if len(fields) < 2 {
- return nil, fmt.Errorf("could not parse GOARCH and Go compiler in format \" \":\nstdout: <<%s>>\nstderr: <<%s>>",
- stdout.String(), stderr.String())
- }
- goarch = fields[0]
- compiler = fields[1]
- }
- return types.SizesFor(compiler, goarch), nil
-}
diff --git a/vendor/golang.org/x/tools/go/loader/doc.go b/vendor/golang.org/x/tools/go/loader/doc.go
deleted file mode 100644
index c5aa31c1a02..00000000000
--- a/vendor/golang.org/x/tools/go/loader/doc.go
+++ /dev/null
@@ -1,204 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package loader loads a complete Go program from source code, parsing
-// and type-checking the initial packages plus their transitive closure
-// of dependencies. The ASTs and the derived facts are retained for
-// later use.
-//
-// Deprecated: This is an older API and does not have support
-// for modules. Use golang.org/x/tools/go/packages instead.
-//
-// The package defines two primary types: Config, which specifies a
-// set of initial packages to load and various other options; and
-// Program, which is the result of successfully loading the packages
-// specified by a configuration.
-//
-// The configuration can be set directly, but *Config provides various
-// convenience methods to simplify the common cases, each of which can
-// be called any number of times. Finally, these are followed by a
-// call to Load() to actually load and type-check the program.
-//
-// var conf loader.Config
-//
-// // Use the command-line arguments to specify
-// // a set of initial packages to load from source.
-// // See FromArgsUsage for help.
-// rest, err := conf.FromArgs(os.Args[1:], wantTests)
-//
-// // Parse the specified files and create an ad hoc package with path "foo".
-// // All files must have the same 'package' declaration.
-// conf.CreateFromFilenames("foo", "foo.go", "bar.go")
-//
-// // Create an ad hoc package with path "foo" from
-// // the specified already-parsed files.
-// // All ASTs must have the same 'package' declaration.
-// conf.CreateFromFiles("foo", parsedFiles)
-//
-// // Add "runtime" to the set of packages to be loaded.
-// conf.Import("runtime")
-//
-// // Adds "fmt" and "fmt_test" to the set of packages
-// // to be loaded. "fmt" will include *_test.go files.
-// conf.ImportWithTests("fmt")
-//
-// // Finally, load all the packages specified by the configuration.
-// prog, err := conf.Load()
-//
-// See examples_test.go for examples of API usage.
-//
-//
-// CONCEPTS AND TERMINOLOGY
-//
-// The WORKSPACE is the set of packages accessible to the loader. The
-// workspace is defined by Config.Build, a *build.Context. The
-// default context treats subdirectories of $GOROOT and $GOPATH as
-// packages, but this behavior may be overridden.
-//
-// An AD HOC package is one specified as a set of source files on the
-// command line. In the simplest case, it may consist of a single file
-// such as $GOROOT/src/net/http/triv.go.
-//
-// EXTERNAL TEST packages are those comprised of a set of *_test.go
-// files all with the same 'package foo_test' declaration, all in the
-// same directory. (go/build.Package calls these files XTestFiles.)
-//
-// An IMPORTABLE package is one that can be referred to by some import
-// spec. Every importable package is uniquely identified by its
-// PACKAGE PATH or just PATH, a string such as "fmt", "encoding/json",
-// or "cmd/vendor/golang.org/x/arch/x86/x86asm". A package path
-// typically denotes a subdirectory of the workspace.
-//
-// An import declaration uses an IMPORT PATH to refer to a package.
-// Most import declarations use the package path as the import path.
-//
-// Due to VENDORING (https://golang.org/s/go15vendor), the
-// interpretation of an import path may depend on the directory in which
-// it appears. To resolve an import path to a package path, go/build
-// must search the enclosing directories for a subdirectory named
-// "vendor".
-//
-// ad hoc packages and external test packages are NON-IMPORTABLE. The
-// path of an ad hoc package is inferred from the package
-// declarations of its files and is therefore not a unique package key.
-// For example, Config.CreatePkgs may specify two initial ad hoc
-// packages, both with path "main".
-//
-// An AUGMENTED package is an importable package P plus all the
-// *_test.go files with same 'package foo' declaration as P.
-// (go/build.Package calls these files TestFiles.)
-//
-// The INITIAL packages are those specified in the configuration. A
-// DEPENDENCY is a package loaded to satisfy an import in an initial
-// package or another dependency.
-//
-package loader
-
-// IMPLEMENTATION NOTES
-//
-// 'go test', in-package test files, and import cycles
-// ---------------------------------------------------
-//
-// An external test package may depend upon members of the augmented
-// package that are not in the unaugmented package, such as functions
-// that expose internals. (See bufio/export_test.go for an example.)
-// So, the loader must ensure that for each external test package
-// it loads, it also augments the corresponding non-test package.
-//
-// The import graph over n unaugmented packages must be acyclic; the
-// import graph over n-1 unaugmented packages plus one augmented
-// package must also be acyclic. ('go test' relies on this.) But the
-// import graph over n augmented packages may contain cycles.
-//
-// First, all the (unaugmented) non-test packages and their
-// dependencies are imported in the usual way; the loader reports an
-// error if it detects an import cycle.
-//
-// Then, each package P for which testing is desired is augmented by
-// the list P' of its in-package test files, by calling
-// (*types.Checker).Files. This arrangement ensures that P' may
-// reference definitions within P, but P may not reference definitions
-// within P'. Furthermore, P' may import any other package, including
-// ones that depend upon P, without an import cycle error.
-//
-// Consider two packages A and B, both of which have lists of
-// in-package test files we'll call A' and B', and which have the
-// following import graph edges:
-// B imports A
-// B' imports A
-// A' imports B
-// This last edge would be expected to create an error were it not
-// for the special type-checking discipline above.
-// Cycles of size greater than two are possible. For example:
-// compress/bzip2/bzip2_test.go (package bzip2) imports "io/ioutil"
-// io/ioutil/tempfile_test.go (package ioutil) imports "regexp"
-// regexp/exec_test.go (package regexp) imports "compress/bzip2"
-//
-//
-// Concurrency
-// -----------
-//
-// Let us define the import dependency graph as follows. Each node is a
-// list of files passed to (Checker).Files at once. Many of these lists
-// are the production code of an importable Go package, so those nodes
-// are labelled by the package's path. The remaining nodes are
-// ad hoc packages and lists of in-package *_test.go files that augment
-// an importable package; those nodes have no label.
-//
-// The edges of the graph represent import statements appearing within a
-// file. An edge connects a node (a list of files) to the node it
-// imports, which is importable and thus always labelled.
-//
-// Loading is controlled by this dependency graph.
-//
-// To reduce I/O latency, we start loading a package's dependencies
-// asynchronously as soon as we've parsed its files and enumerated its
-// imports (scanImports). This performs a preorder traversal of the
-// import dependency graph.
-//
-// To exploit hardware parallelism, we type-check unrelated packages in
-// parallel, where "unrelated" means not ordered by the partial order of
-// the import dependency graph.
-//
-// We use a concurrency-safe non-blocking cache (importer.imported) to
-// record the results of type-checking, whether success or failure. An
-// entry is created in this cache by startLoad the first time the
-// package is imported. The first goroutine to request an entry becomes
-// responsible for completing the task and broadcasting completion to
-// subsequent requestors, which block until then.
-//
-// Type checking occurs in (parallel) postorder: we cannot type-check a
-// set of files until we have loaded and type-checked all of their
-// immediate dependencies (and thus all of their transitive
-// dependencies). If the input were guaranteed free of import cycles,
-// this would be trivial: we could simply wait for completion of the
-// dependencies and then invoke the typechecker.
-//
-// But as we saw in the 'go test' section above, some cycles in the
-// import graph over packages are actually legal, so long as the
-// cycle-forming edge originates in the in-package test files that
-// augment the package. This explains why the nodes of the import
-// dependency graph are not packages, but lists of files: the unlabelled
-// nodes avoid the cycles. Consider packages A and B where B imports A
-// and A's in-package tests AT import B. The naively constructed import
-// graph over packages would contain a cycle (A+AT) --> B --> (A+AT) but
-// the graph over lists of files is AT --> B --> A, where AT is an
-// unlabelled node.
-//
-// Awaiting completion of the dependencies in a cyclic graph would
-// deadlock, so we must materialize the import dependency graph (as
-// importer.graph) and check whether each import edge forms a cycle. If
-// x imports y, and the graph already contains a path from y to x, then
-// there is an import cycle, in which case the processing of x must not
-// wait for the completion of processing of y.
-//
-// When the type-checker makes a callback (doImport) to the loader for a
-// given import edge, there are two possible cases. In the normal case,
-// the dependency has already been completely type-checked; doImport
-// does a cache lookup and returns it. In the cyclic case, the entry in
-// the cache is still necessarily incomplete, indicating a cycle. We
-// perform the cycle check again to obtain the error message, and return
-// the error.
-//
-// The result of using concurrency is about a 2.5x speedup for stdlib_test.
diff --git a/vendor/golang.org/x/tools/go/loader/loader.go b/vendor/golang.org/x/tools/go/loader/loader.go
deleted file mode 100644
index bc12ca33d1a..00000000000
--- a/vendor/golang.org/x/tools/go/loader/loader.go
+++ /dev/null
@@ -1,1086 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package loader
-
-// See doc.go for package documentation and implementation notes.
-
-import (
- "errors"
- "fmt"
- "go/ast"
- "go/build"
- "go/parser"
- "go/token"
- "go/types"
- "os"
- "path/filepath"
- "sort"
- "strings"
- "sync"
- "time"
-
- "golang.org/x/tools/go/ast/astutil"
- "golang.org/x/tools/go/internal/cgo"
-)
-
-var ignoreVendor build.ImportMode
-
-const trace = false // show timing info for type-checking
-
-// Config specifies the configuration for loading a whole program from
-// Go source code.
-// The zero value for Config is a ready-to-use default configuration.
-type Config struct {
- // Fset is the file set for the parser to use when loading the
- // program. If nil, it may be lazily initialized by any
- // method of Config.
- Fset *token.FileSet
-
- // ParserMode specifies the mode to be used by the parser when
- // loading source packages.
- ParserMode parser.Mode
-
- // TypeChecker contains options relating to the type checker.
- //
- // The supplied IgnoreFuncBodies is not used; the effective
- // value comes from the TypeCheckFuncBodies func below.
- // The supplied Import function is not used either.
- TypeChecker types.Config
-
- // TypeCheckFuncBodies is a predicate over package paths.
- // A package for which the predicate is false will
- // have its package-level declarations type checked, but not
- // its function bodies; this can be used to quickly load
- // dependencies from source. If nil, all func bodies are type
- // checked.
- TypeCheckFuncBodies func(path string) bool
-
- // If Build is non-nil, it is used to locate source packages.
- // Otherwise &build.Default is used.
- //
- // By default, cgo is invoked to preprocess Go files that
- // import the fake package "C". This behaviour can be
- // disabled by setting CGO_ENABLED=0 in the environment prior
- // to startup, or by setting Build.CgoEnabled=false.
- Build *build.Context
-
- // The current directory, used for resolving relative package
- // references such as "./go/loader". If empty, os.Getwd will be
- // used instead.
- Cwd string
-
- // If DisplayPath is non-nil, it is used to transform each
- // file name obtained from Build.Import(). This can be used
- // to prevent a virtualized build.Config's file names from
- // leaking into the user interface.
- DisplayPath func(path string) string
-
- // If AllowErrors is true, Load will return a Program even
- // if some of the its packages contained I/O, parser or type
- // errors; such errors are accessible via PackageInfo.Errors. If
- // false, Load will fail if any package had an error.
- AllowErrors bool
-
- // CreatePkgs specifies a list of non-importable initial
- // packages to create. The resulting packages will appear in
- // the corresponding elements of the Program.Created slice.
- CreatePkgs []PkgSpec
-
- // ImportPkgs specifies a set of initial packages to load.
- // The map keys are package paths.
- //
- // The map value indicates whether to load tests. If true, Load
- // will add and type-check two lists of files to the package:
- // non-test files followed by in-package *_test.go files. In
- // addition, it will append the external test package (if any)
- // to Program.Created.
- ImportPkgs map[string]bool
-
- // FindPackage is called during Load to create the build.Package
- // for a given import path from a given directory.
- // If FindPackage is nil, (*build.Context).Import is used.
- // A client may use this hook to adapt to a proprietary build
- // system that does not follow the "go build" layout
- // conventions, for example.
- //
- // It must be safe to call concurrently from multiple goroutines.
- FindPackage func(ctxt *build.Context, importPath, fromDir string, mode build.ImportMode) (*build.Package, error)
-
- // AfterTypeCheck is called immediately after a list of files
- // has been type-checked and appended to info.Files.
- //
- // This optional hook function is the earliest opportunity for
- // the client to observe the output of the type checker,
- // which may be useful to reduce analysis latency when loading
- // a large program.
- //
- // The function is permitted to modify info.Info, for instance
- // to clear data structures that are no longer needed, which can
- // dramatically reduce peak memory consumption.
- //
- // The function may be called twice for the same PackageInfo:
- // once for the files of the package and again for the
- // in-package test files.
- //
- // It must be safe to call concurrently from multiple goroutines.
- AfterTypeCheck func(info *PackageInfo, files []*ast.File)
-}
-
-// A PkgSpec specifies a non-importable package to be created by Load.
-// Files are processed first, but typically only one of Files and
-// Filenames is provided. The path needn't be globally unique.
-//
-// For vendoring purposes, the package's directory is the one that
-// contains the first file.
-type PkgSpec struct {
- Path string // package path ("" => use package declaration)
- Files []*ast.File // ASTs of already-parsed files
- Filenames []string // names of files to be parsed
-}
-
-// A Program is a Go program loaded from source as specified by a Config.
-type Program struct {
- Fset *token.FileSet // the file set for this program
-
- // Created[i] contains the initial package whose ASTs or
- // filenames were supplied by Config.CreatePkgs[i], followed by
- // the external test package, if any, of each package in
- // Config.ImportPkgs ordered by ImportPath.
- //
- // NOTE: these files must not import "C". Cgo preprocessing is
- // only performed on imported packages, not ad hoc packages.
- //
- // TODO(adonovan): we need to copy and adapt the logic of
- // goFilesPackage (from $GOROOT/src/cmd/go/build.go) and make
- // Config.Import and Config.Create methods return the same kind
- // of entity, essentially a build.Package.
- // Perhaps we can even reuse that type directly.
- Created []*PackageInfo
-
- // Imported contains the initially imported packages,
- // as specified by Config.ImportPkgs.
- Imported map[string]*PackageInfo
-
- // AllPackages contains the PackageInfo of every package
- // encountered by Load: all initial packages and all
- // dependencies, including incomplete ones.
- AllPackages map[*types.Package]*PackageInfo
-
- // importMap is the canonical mapping of package paths to
- // packages. It contains all Imported initial packages, but not
- // Created ones, and all imported dependencies.
- importMap map[string]*types.Package
-}
-
-// PackageInfo holds the ASTs and facts derived by the type-checker
-// for a single package.
-//
-// Not mutated once exposed via the API.
-//
-type PackageInfo struct {
- Pkg *types.Package
- Importable bool // true if 'import "Pkg.Path()"' would resolve to this
- TransitivelyErrorFree bool // true if Pkg and all its dependencies are free of errors
- Files []*ast.File // syntax trees for the package's files
- Errors []error // non-nil if the package had errors
- types.Info // type-checker deductions.
- dir string // package directory
-
- checker *types.Checker // transient type-checker state
- errorFunc func(error)
-}
-
-func (info *PackageInfo) String() string { return info.Pkg.Path() }
-
-func (info *PackageInfo) appendError(err error) {
- if info.errorFunc != nil {
- info.errorFunc(err)
- } else {
- fmt.Fprintln(os.Stderr, err)
- }
- info.Errors = append(info.Errors, err)
-}
-
-func (conf *Config) fset() *token.FileSet {
- if conf.Fset == nil {
- conf.Fset = token.NewFileSet()
- }
- return conf.Fset
-}
-
-// ParseFile is a convenience function (intended for testing) that invokes
-// the parser using the Config's FileSet, which is initialized if nil.
-//
-// src specifies the parser input as a string, []byte, or io.Reader, and
-// filename is its apparent name. If src is nil, the contents of
-// filename are read from the file system.
-//
-func (conf *Config) ParseFile(filename string, src interface{}) (*ast.File, error) {
- // TODO(adonovan): use conf.build() etc like parseFiles does.
- return parser.ParseFile(conf.fset(), filename, src, conf.ParserMode)
-}
-
-// FromArgsUsage is a partial usage message that applications calling
-// FromArgs may wish to include in their -help output.
-const FromArgsUsage = `
- is a list of arguments denoting a set of initial packages.
-It may take one of two forms:
-
-1. A list of *.go source files.
-
- All of the specified files are loaded, parsed and type-checked
- as a single package. All the files must belong to the same directory.
-
-2. A list of import paths, each denoting a package.
-
- The package's directory is found relative to the $GOROOT and
- $GOPATH using similar logic to 'go build', and the *.go files in
- that directory are loaded, parsed and type-checked as a single
- package.
-
- In addition, all *_test.go files in the directory are then loaded
- and parsed. Those files whose package declaration equals that of
- the non-*_test.go files are included in the primary package. Test
- files whose package declaration ends with "_test" are type-checked
- as another package, the 'external' test package, so that a single
- import path may denote two packages. (Whether this behaviour is
- enabled is tool-specific, and may depend on additional flags.)
-
-A '--' argument terminates the list of packages.
-`
-
-// FromArgs interprets args as a set of initial packages to load from
-// source and updates the configuration. It returns the list of
-// unconsumed arguments.
-//
-// It is intended for use in command-line interfaces that require a
-// set of initial packages to be specified; see FromArgsUsage message
-// for details.
-//
-// Only superficial errors are reported at this stage; errors dependent
-// on I/O are detected during Load.
-//
-func (conf *Config) FromArgs(args []string, xtest bool) ([]string, error) {
- var rest []string
- for i, arg := range args {
- if arg == "--" {
- rest = args[i+1:]
- args = args[:i]
- break // consume "--" and return the remaining args
- }
- }
-
- if len(args) > 0 && strings.HasSuffix(args[0], ".go") {
- // Assume args is a list of a *.go files
- // denoting a single ad hoc package.
- for _, arg := range args {
- if !strings.HasSuffix(arg, ".go") {
- return nil, fmt.Errorf("named files must be .go files: %s", arg)
- }
- }
- conf.CreateFromFilenames("", args...)
- } else {
- // Assume args are directories each denoting a
- // package and (perhaps) an external test, iff xtest.
- for _, arg := range args {
- if xtest {
- conf.ImportWithTests(arg)
- } else {
- conf.Import(arg)
- }
- }
- }
-
- return rest, nil
-}
-
-// CreateFromFilenames is a convenience function that adds
-// a conf.CreatePkgs entry to create a package of the specified *.go
-// files.
-//
-func (conf *Config) CreateFromFilenames(path string, filenames ...string) {
- conf.CreatePkgs = append(conf.CreatePkgs, PkgSpec{Path: path, Filenames: filenames})
-}
-
-// CreateFromFiles is a convenience function that adds a conf.CreatePkgs
-// entry to create package of the specified path and parsed files.
-//
-func (conf *Config) CreateFromFiles(path string, files ...*ast.File) {
- conf.CreatePkgs = append(conf.CreatePkgs, PkgSpec{Path: path, Files: files})
-}
-
-// ImportWithTests is a convenience function that adds path to
-// ImportPkgs, the set of initial source packages located relative to
-// $GOPATH. The package will be augmented by any *_test.go files in
-// its directory that contain a "package x" (not "package x_test")
-// declaration.
-//
-// In addition, if any *_test.go files contain a "package x_test"
-// declaration, an additional package comprising just those files will
-// be added to CreatePkgs.
-//
-func (conf *Config) ImportWithTests(path string) { conf.addImport(path, true) }
-
-// Import is a convenience function that adds path to ImportPkgs, the
-// set of initial packages that will be imported from source.
-//
-func (conf *Config) Import(path string) { conf.addImport(path, false) }
-
-func (conf *Config) addImport(path string, tests bool) {
- if path == "C" {
- return // ignore; not a real package
- }
- if conf.ImportPkgs == nil {
- conf.ImportPkgs = make(map[string]bool)
- }
- conf.ImportPkgs[path] = conf.ImportPkgs[path] || tests
-}
-
-// PathEnclosingInterval returns the PackageInfo and ast.Node that
-// contain source interval [start, end), and all the node's ancestors
-// up to the AST root. It searches all ast.Files of all packages in prog.
-// exact is defined as for astutil.PathEnclosingInterval.
-//
-// The zero value is returned if not found.
-//
-func (prog *Program) PathEnclosingInterval(start, end token.Pos) (pkg *PackageInfo, path []ast.Node, exact bool) {
- for _, info := range prog.AllPackages {
- for _, f := range info.Files {
- if f.Pos() == token.NoPos {
- // This can happen if the parser saw
- // too many errors and bailed out.
- // (Use parser.AllErrors to prevent that.)
- continue
- }
- if !tokenFileContainsPos(prog.Fset.File(f.Pos()), start) {
- continue
- }
- if path, exact := astutil.PathEnclosingInterval(f, start, end); path != nil {
- return info, path, exact
- }
- }
- }
- return nil, nil, false
-}
-
-// InitialPackages returns a new slice containing the set of initial
-// packages (Created + Imported) in unspecified order.
-//
-func (prog *Program) InitialPackages() []*PackageInfo {
- infos := make([]*PackageInfo, 0, len(prog.Created)+len(prog.Imported))
- infos = append(infos, prog.Created...)
- for _, info := range prog.Imported {
- infos = append(infos, info)
- }
- return infos
-}
-
-// Package returns the ASTs and results of type checking for the
-// specified package.
-func (prog *Program) Package(path string) *PackageInfo {
- if info, ok := prog.AllPackages[prog.importMap[path]]; ok {
- return info
- }
- for _, info := range prog.Created {
- if path == info.Pkg.Path() {
- return info
- }
- }
- return nil
-}
-
-// ---------- Implementation ----------
-
-// importer holds the working state of the algorithm.
-type importer struct {
- conf *Config // the client configuration
- start time.Time // for logging
-
- progMu sync.Mutex // guards prog
- prog *Program // the resulting program
-
- // findpkg is a memoization of FindPackage.
- findpkgMu sync.Mutex // guards findpkg
- findpkg map[findpkgKey]*findpkgValue
-
- importedMu sync.Mutex // guards imported
- imported map[string]*importInfo // all imported packages (incl. failures) by import path
-
- // import dependency graph: graph[x][y] => x imports y
- //
- // Since non-importable packages cannot be cyclic, we ignore
- // their imports, thus we only need the subgraph over importable
- // packages. Nodes are identified by their import paths.
- graphMu sync.Mutex
- graph map[string]map[string]bool
-}
-
-type findpkgKey struct {
- importPath string
- fromDir string
- mode build.ImportMode
-}
-
-type findpkgValue struct {
- ready chan struct{} // closed to broadcast readiness
- bp *build.Package
- err error
-}
-
-// importInfo tracks the success or failure of a single import.
-//
-// Upon completion, exactly one of info and err is non-nil:
-// info on successful creation of a package, err otherwise.
-// A successful package may still contain type errors.
-//
-type importInfo struct {
- path string // import path
- info *PackageInfo // results of typechecking (including errors)
- complete chan struct{} // closed to broadcast that info is set.
-}
-
-// awaitCompletion blocks until ii is complete,
-// i.e. the info field is safe to inspect.
-func (ii *importInfo) awaitCompletion() {
- <-ii.complete // wait for close
-}
-
-// Complete marks ii as complete.
-// Its info and err fields will not be subsequently updated.
-func (ii *importInfo) Complete(info *PackageInfo) {
- if info == nil {
- panic("info == nil")
- }
- ii.info = info
- close(ii.complete)
-}
-
-type importError struct {
- path string // import path
- err error // reason for failure to create a package
-}
-
-// Load creates the initial packages specified by conf.{Create,Import}Pkgs,
-// loading their dependencies packages as needed.
-//
-// On success, Load returns a Program containing a PackageInfo for
-// each package. On failure, it returns an error.
-//
-// If AllowErrors is true, Load will return a Program even if some
-// packages contained I/O, parser or type errors, or if dependencies
-// were missing. (Such errors are accessible via PackageInfo.Errors. If
-// false, Load will fail if any package had an error.
-//
-// It is an error if no packages were loaded.
-//
-func (conf *Config) Load() (*Program, error) {
- // Create a simple default error handler for parse/type errors.
- if conf.TypeChecker.Error == nil {
- conf.TypeChecker.Error = func(e error) { fmt.Fprintln(os.Stderr, e) }
- }
-
- // Set default working directory for relative package references.
- if conf.Cwd == "" {
- var err error
- conf.Cwd, err = os.Getwd()
- if err != nil {
- return nil, err
- }
- }
-
- // Install default FindPackage hook using go/build logic.
- if conf.FindPackage == nil {
- conf.FindPackage = (*build.Context).Import
- }
-
- prog := &Program{
- Fset: conf.fset(),
- Imported: make(map[string]*PackageInfo),
- importMap: make(map[string]*types.Package),
- AllPackages: make(map[*types.Package]*PackageInfo),
- }
-
- imp := importer{
- conf: conf,
- prog: prog,
- findpkg: make(map[findpkgKey]*findpkgValue),
- imported: make(map[string]*importInfo),
- start: time.Now(),
- graph: make(map[string]map[string]bool),
- }
-
- // -- loading proper (concurrent phase) --------------------------------
-
- var errpkgs []string // packages that contained errors
-
- // Load the initially imported packages and their dependencies,
- // in parallel.
- // No vendor check on packages imported from the command line.
- infos, importErrors := imp.importAll("", conf.Cwd, conf.ImportPkgs, ignoreVendor)
- for _, ie := range importErrors {
- conf.TypeChecker.Error(ie.err) // failed to create package
- errpkgs = append(errpkgs, ie.path)
- }
- for _, info := range infos {
- prog.Imported[info.Pkg.Path()] = info
- }
-
- // Augment the designated initial packages by their tests.
- // Dependencies are loaded in parallel.
- var xtestPkgs []*build.Package
- for importPath, augment := range conf.ImportPkgs {
- if !augment {
- continue
- }
-
- // No vendor check on packages imported from command line.
- bp, err := imp.findPackage(importPath, conf.Cwd, ignoreVendor)
- if err != nil {
- // Package not found, or can't even parse package declaration.
- // Already reported by previous loop; ignore it.
- continue
- }
-
- // Needs external test package?
- if len(bp.XTestGoFiles) > 0 {
- xtestPkgs = append(xtestPkgs, bp)
- }
-
- // Consult the cache using the canonical package path.
- path := bp.ImportPath
- imp.importedMu.Lock() // (unnecessary, we're sequential here)
- ii, ok := imp.imported[path]
- // Paranoid checks added due to issue #11012.
- if !ok {
- // Unreachable.
- // The previous loop called importAll and thus
- // startLoad for each path in ImportPkgs, which
- // populates imp.imported[path] with a non-zero value.
- panic(fmt.Sprintf("imported[%q] not found", path))
- }
- if ii == nil {
- // Unreachable.
- // The ii values in this loop are the same as in
- // the previous loop, which enforced the invariant
- // that at least one of ii.err and ii.info is non-nil.
- panic(fmt.Sprintf("imported[%q] == nil", path))
- }
- if ii.info == nil {
- // Unreachable.
- // awaitCompletion has the postcondition
- // ii.info != nil.
- panic(fmt.Sprintf("imported[%q].info = nil", path))
- }
- info := ii.info
- imp.importedMu.Unlock()
-
- // Parse the in-package test files.
- files, errs := imp.conf.parsePackageFiles(bp, 't')
- for _, err := range errs {
- info.appendError(err)
- }
-
- // The test files augmenting package P cannot be imported,
- // but may import packages that import P,
- // so we must disable the cycle check.
- imp.addFiles(info, files, false)
- }
-
- createPkg := func(path, dir string, files []*ast.File, errs []error) {
- info := imp.newPackageInfo(path, dir)
- for _, err := range errs {
- info.appendError(err)
- }
-
- // Ad hoc packages are non-importable,
- // so no cycle check is needed.
- // addFiles loads dependencies in parallel.
- imp.addFiles(info, files, false)
- prog.Created = append(prog.Created, info)
- }
-
- // Create packages specified by conf.CreatePkgs.
- for _, cp := range conf.CreatePkgs {
- files, errs := parseFiles(conf.fset(), conf.build(), nil, conf.Cwd, cp.Filenames, conf.ParserMode)
- files = append(files, cp.Files...)
-
- path := cp.Path
- if path == "" {
- if len(files) > 0 {
- path = files[0].Name.Name
- } else {
- path = "(unnamed)"
- }
- }
-
- dir := conf.Cwd
- if len(files) > 0 && files[0].Pos().IsValid() {
- dir = filepath.Dir(conf.fset().File(files[0].Pos()).Name())
- }
- createPkg(path, dir, files, errs)
- }
-
- // Create external test packages.
- sort.Sort(byImportPath(xtestPkgs))
- for _, bp := range xtestPkgs {
- files, errs := imp.conf.parsePackageFiles(bp, 'x')
- createPkg(bp.ImportPath+"_test", bp.Dir, files, errs)
- }
-
- // -- finishing up (sequential) ----------------------------------------
-
- if len(prog.Imported)+len(prog.Created) == 0 {
- return nil, errors.New("no initial packages were loaded")
- }
-
- // Create infos for indirectly imported packages.
- // e.g. incomplete packages without syntax, loaded from export data.
- for _, obj := range prog.importMap {
- info := prog.AllPackages[obj]
- if info == nil {
- prog.AllPackages[obj] = &PackageInfo{Pkg: obj, Importable: true}
- } else {
- // finished
- info.checker = nil
- info.errorFunc = nil
- }
- }
-
- if !conf.AllowErrors {
- // Report errors in indirectly imported packages.
- for _, info := range prog.AllPackages {
- if len(info.Errors) > 0 {
- errpkgs = append(errpkgs, info.Pkg.Path())
- }
- }
- if errpkgs != nil {
- var more string
- if len(errpkgs) > 3 {
- more = fmt.Sprintf(" and %d more", len(errpkgs)-3)
- errpkgs = errpkgs[:3]
- }
- return nil, fmt.Errorf("couldn't load packages due to errors: %s%s",
- strings.Join(errpkgs, ", "), more)
- }
- }
-
- markErrorFreePackages(prog.AllPackages)
-
- return prog, nil
-}
-
-type byImportPath []*build.Package
-
-func (b byImportPath) Len() int { return len(b) }
-func (b byImportPath) Less(i, j int) bool { return b[i].ImportPath < b[j].ImportPath }
-func (b byImportPath) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
-
-// markErrorFreePackages sets the TransitivelyErrorFree flag on all
-// applicable packages.
-func markErrorFreePackages(allPackages map[*types.Package]*PackageInfo) {
- // Build the transpose of the import graph.
- importedBy := make(map[*types.Package]map[*types.Package]bool)
- for P := range allPackages {
- for _, Q := range P.Imports() {
- clients, ok := importedBy[Q]
- if !ok {
- clients = make(map[*types.Package]bool)
- importedBy[Q] = clients
- }
- clients[P] = true
- }
- }
-
- // Find all packages reachable from some error package.
- reachable := make(map[*types.Package]bool)
- var visit func(*types.Package)
- visit = func(p *types.Package) {
- if !reachable[p] {
- reachable[p] = true
- for q := range importedBy[p] {
- visit(q)
- }
- }
- }
- for _, info := range allPackages {
- if len(info.Errors) > 0 {
- visit(info.Pkg)
- }
- }
-
- // Mark the others as "transitively error-free".
- for _, info := range allPackages {
- if !reachable[info.Pkg] {
- info.TransitivelyErrorFree = true
- }
- }
-}
-
-// build returns the effective build context.
-func (conf *Config) build() *build.Context {
- if conf.Build != nil {
- return conf.Build
- }
- return &build.Default
-}
-
-// parsePackageFiles enumerates the files belonging to package path,
-// then loads, parses and returns them, plus a list of I/O or parse
-// errors that were encountered.
-//
-// 'which' indicates which files to include:
-// 'g': include non-test *.go source files (GoFiles + processed CgoFiles)
-// 't': include in-package *_test.go source files (TestGoFiles)
-// 'x': include external *_test.go source files. (XTestGoFiles)
-//
-func (conf *Config) parsePackageFiles(bp *build.Package, which rune) ([]*ast.File, []error) {
- if bp.ImportPath == "unsafe" {
- return nil, nil
- }
- var filenames []string
- switch which {
- case 'g':
- filenames = bp.GoFiles
- case 't':
- filenames = bp.TestGoFiles
- case 'x':
- filenames = bp.XTestGoFiles
- default:
- panic(which)
- }
-
- files, errs := parseFiles(conf.fset(), conf.build(), conf.DisplayPath, bp.Dir, filenames, conf.ParserMode)
-
- // Preprocess CgoFiles and parse the outputs (sequentially).
- if which == 'g' && bp.CgoFiles != nil {
- cgofiles, err := cgo.ProcessFiles(bp, conf.fset(), conf.DisplayPath, conf.ParserMode)
- if err != nil {
- errs = append(errs, err)
- } else {
- files = append(files, cgofiles...)
- }
- }
-
- return files, errs
-}
-
-// doImport imports the package denoted by path.
-// It implements the types.Importer signature.
-//
-// It returns an error if a package could not be created
-// (e.g. go/build or parse error), but type errors are reported via
-// the types.Config.Error callback (the first of which is also saved
-// in the package's PackageInfo).
-//
-// Idempotent.
-//
-func (imp *importer) doImport(from *PackageInfo, to string) (*types.Package, error) {
- if to == "C" {
- // This should be unreachable, but ad hoc packages are
- // not currently subject to cgo preprocessing.
- // See https://golang.org/issue/11627.
- return nil, fmt.Errorf(`the loader doesn't cgo-process ad hoc packages like %q; see Go issue 11627`,
- from.Pkg.Path())
- }
-
- bp, err := imp.findPackage(to, from.dir, 0)
- if err != nil {
- return nil, err
- }
-
- // The standard unsafe package is handled specially,
- // and has no PackageInfo.
- if bp.ImportPath == "unsafe" {
- return types.Unsafe, nil
- }
-
- // Look for the package in the cache using its canonical path.
- path := bp.ImportPath
- imp.importedMu.Lock()
- ii := imp.imported[path]
- imp.importedMu.Unlock()
- if ii == nil {
- panic("internal error: unexpected import: " + path)
- }
- if ii.info != nil {
- return ii.info.Pkg, nil
- }
-
- // Import of incomplete package: this indicates a cycle.
- fromPath := from.Pkg.Path()
- if cycle := imp.findPath(path, fromPath); cycle != nil {
- // Normalize cycle: start from alphabetically largest node.
- pos, start := -1, ""
- for i, s := range cycle {
- if pos < 0 || s > start {
- pos, start = i, s
- }
- }
- cycle = append(cycle, cycle[:pos]...)[pos:] // rotate cycle to start from largest
- cycle = append(cycle, cycle[0]) // add start node to end to show cycliness
- return nil, fmt.Errorf("import cycle: %s", strings.Join(cycle, " -> "))
- }
-
- panic("internal error: import of incomplete (yet acyclic) package: " + fromPath)
-}
-
-// findPackage locates the package denoted by the importPath in the
-// specified directory.
-func (imp *importer) findPackage(importPath, fromDir string, mode build.ImportMode) (*build.Package, error) {
- // We use a non-blocking duplicate-suppressing cache (gopl.io §9.7)
- // to avoid holding the lock around FindPackage.
- key := findpkgKey{importPath, fromDir, mode}
- imp.findpkgMu.Lock()
- v, ok := imp.findpkg[key]
- if ok {
- // cache hit
- imp.findpkgMu.Unlock()
-
- <-v.ready // wait for entry to become ready
- } else {
- // Cache miss: this goroutine becomes responsible for
- // populating the map entry and broadcasting its readiness.
- v = &findpkgValue{ready: make(chan struct{})}
- imp.findpkg[key] = v
- imp.findpkgMu.Unlock()
-
- ioLimit <- true
- v.bp, v.err = imp.conf.FindPackage(imp.conf.build(), importPath, fromDir, mode)
- <-ioLimit
-
- if _, ok := v.err.(*build.NoGoError); ok {
- v.err = nil // empty directory is not an error
- }
-
- close(v.ready) // broadcast ready condition
- }
- return v.bp, v.err
-}
-
-// importAll loads, parses, and type-checks the specified packages in
-// parallel and returns their completed importInfos in unspecified order.
-//
-// fromPath is the package path of the importing package, if it is
-// importable, "" otherwise. It is used for cycle detection.
-//
-// fromDir is the directory containing the import declaration that
-// caused these imports.
-//
-func (imp *importer) importAll(fromPath, fromDir string, imports map[string]bool, mode build.ImportMode) (infos []*PackageInfo, errors []importError) {
- // TODO(adonovan): opt: do the loop in parallel once
- // findPackage is non-blocking.
- var pending []*importInfo
- for importPath := range imports {
- bp, err := imp.findPackage(importPath, fromDir, mode)
- if err != nil {
- errors = append(errors, importError{
- path: importPath,
- err: err,
- })
- continue
- }
- pending = append(pending, imp.startLoad(bp))
- }
-
- if fromPath != "" {
- // We're loading a set of imports.
- //
- // We must record graph edges from the importing package
- // to its dependencies, and check for cycles.
- imp.graphMu.Lock()
- deps, ok := imp.graph[fromPath]
- if !ok {
- deps = make(map[string]bool)
- imp.graph[fromPath] = deps
- }
- for _, ii := range pending {
- deps[ii.path] = true
- }
- imp.graphMu.Unlock()
- }
-
- for _, ii := range pending {
- if fromPath != "" {
- if cycle := imp.findPath(ii.path, fromPath); cycle != nil {
- // Cycle-forming import: we must not await its
- // completion since it would deadlock.
- //
- // We don't record the error in ii since
- // the error is really associated with the
- // cycle-forming edge, not the package itself.
- // (Also it would complicate the
- // invariants of importPath completion.)
- if trace {
- fmt.Fprintf(os.Stderr, "import cycle: %q\n", cycle)
- }
- continue
- }
- }
- ii.awaitCompletion()
- infos = append(infos, ii.info)
- }
-
- return infos, errors
-}
-
-// findPath returns an arbitrary path from 'from' to 'to' in the import
-// graph, or nil if there was none.
-func (imp *importer) findPath(from, to string) []string {
- imp.graphMu.Lock()
- defer imp.graphMu.Unlock()
-
- seen := make(map[string]bool)
- var search func(stack []string, importPath string) []string
- search = func(stack []string, importPath string) []string {
- if !seen[importPath] {
- seen[importPath] = true
- stack = append(stack, importPath)
- if importPath == to {
- return stack
- }
- for x := range imp.graph[importPath] {
- if p := search(stack, x); p != nil {
- return p
- }
- }
- }
- return nil
- }
- return search(make([]string, 0, 20), from)
-}
-
-// startLoad initiates the loading, parsing and type-checking of the
-// specified package and its dependencies, if it has not already begun.
-//
-// It returns an importInfo, not necessarily in a completed state. The
-// caller must call awaitCompletion() before accessing its info field.
-//
-// startLoad is concurrency-safe and idempotent.
-//
-func (imp *importer) startLoad(bp *build.Package) *importInfo {
- path := bp.ImportPath
- imp.importedMu.Lock()
- ii, ok := imp.imported[path]
- if !ok {
- ii = &importInfo{path: path, complete: make(chan struct{})}
- imp.imported[path] = ii
- go func() {
- info := imp.load(bp)
- ii.Complete(info)
- }()
- }
- imp.importedMu.Unlock()
-
- return ii
-}
-
-// load implements package loading by parsing Go source files
-// located by go/build.
-func (imp *importer) load(bp *build.Package) *PackageInfo {
- info := imp.newPackageInfo(bp.ImportPath, bp.Dir)
- info.Importable = true
- files, errs := imp.conf.parsePackageFiles(bp, 'g')
- for _, err := range errs {
- info.appendError(err)
- }
-
- imp.addFiles(info, files, true)
-
- imp.progMu.Lock()
- imp.prog.importMap[bp.ImportPath] = info.Pkg
- imp.progMu.Unlock()
-
- return info
-}
-
-// addFiles adds and type-checks the specified files to info, loading
-// their dependencies if needed. The order of files determines the
-// package initialization order. It may be called multiple times on the
-// same package. Errors are appended to the info.Errors field.
-//
-// cycleCheck determines whether the imports within files create
-// dependency edges that should be checked for potential cycles.
-//
-func (imp *importer) addFiles(info *PackageInfo, files []*ast.File, cycleCheck bool) {
- // Ensure the dependencies are loaded, in parallel.
- var fromPath string
- if cycleCheck {
- fromPath = info.Pkg.Path()
- }
- // TODO(adonovan): opt: make the caller do scanImports.
- // Callers with a build.Package can skip it.
- imp.importAll(fromPath, info.dir, scanImports(files), 0)
-
- if trace {
- fmt.Fprintf(os.Stderr, "%s: start %q (%d)\n",
- time.Since(imp.start), info.Pkg.Path(), len(files))
- }
-
- // Don't call checker.Files on Unsafe, even with zero files,
- // because it would mutate the package, which is a global.
- if info.Pkg == types.Unsafe {
- if len(files) > 0 {
- panic(`"unsafe" package contains unexpected files`)
- }
- } else {
- // Ignore the returned (first) error since we
- // already collect them all in the PackageInfo.
- info.checker.Files(files)
- info.Files = append(info.Files, files...)
- }
-
- if imp.conf.AfterTypeCheck != nil {
- imp.conf.AfterTypeCheck(info, files)
- }
-
- if trace {
- fmt.Fprintf(os.Stderr, "%s: stop %q\n",
- time.Since(imp.start), info.Pkg.Path())
- }
-}
-
-func (imp *importer) newPackageInfo(path, dir string) *PackageInfo {
- var pkg *types.Package
- if path == "unsafe" {
- pkg = types.Unsafe
- } else {
- pkg = types.NewPackage(path, "")
- }
- info := &PackageInfo{
- Pkg: pkg,
- Info: types.Info{
- Types: make(map[ast.Expr]types.TypeAndValue),
- Defs: make(map[*ast.Ident]types.Object),
- Uses: make(map[*ast.Ident]types.Object),
- Implicits: make(map[ast.Node]types.Object),
- Scopes: make(map[ast.Node]*types.Scope),
- Selections: make(map[*ast.SelectorExpr]*types.Selection),
- },
- errorFunc: imp.conf.TypeChecker.Error,
- dir: dir,
- }
-
- // Copy the types.Config so we can vary it across PackageInfos.
- tc := imp.conf.TypeChecker
- tc.IgnoreFuncBodies = false
- if f := imp.conf.TypeCheckFuncBodies; f != nil {
- tc.IgnoreFuncBodies = !f(path)
- }
- tc.Importer = closure{imp, info}
- tc.Error = info.appendError // appendError wraps the user's Error function
-
- info.checker = types.NewChecker(&tc, imp.conf.fset(), pkg, &info.Info)
- imp.progMu.Lock()
- imp.prog.AllPackages[pkg] = info
- imp.progMu.Unlock()
- return info
-}
-
-type closure struct {
- imp *importer
- info *PackageInfo
-}
-
-func (c closure) Import(to string) (*types.Package, error) { return c.imp.doImport(c.info, to) }
diff --git a/vendor/golang.org/x/tools/go/loader/util.go b/vendor/golang.org/x/tools/go/loader/util.go
deleted file mode 100644
index 7f38dd74077..00000000000
--- a/vendor/golang.org/x/tools/go/loader/util.go
+++ /dev/null
@@ -1,124 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package loader
-
-import (
- "go/ast"
- "go/build"
- "go/parser"
- "go/token"
- "io"
- "os"
- "strconv"
- "sync"
-
- "golang.org/x/tools/go/buildutil"
-)
-
-// We use a counting semaphore to limit
-// the number of parallel I/O calls per process.
-var ioLimit = make(chan bool, 10)
-
-// parseFiles parses the Go source files within directory dir and
-// returns the ASTs of the ones that could be at least partially parsed,
-// along with a list of I/O and parse errors encountered.
-//
-// I/O is done via ctxt, which may specify a virtual file system.
-// displayPath is used to transform the filenames attached to the ASTs.
-//
-func parseFiles(fset *token.FileSet, ctxt *build.Context, displayPath func(string) string, dir string, files []string, mode parser.Mode) ([]*ast.File, []error) {
- if displayPath == nil {
- displayPath = func(path string) string { return path }
- }
- var wg sync.WaitGroup
- n := len(files)
- parsed := make([]*ast.File, n)
- errors := make([]error, n)
- for i, file := range files {
- if !buildutil.IsAbsPath(ctxt, file) {
- file = buildutil.JoinPath(ctxt, dir, file)
- }
- wg.Add(1)
- go func(i int, file string) {
- ioLimit <- true // wait
- defer func() {
- wg.Done()
- <-ioLimit // signal
- }()
- var rd io.ReadCloser
- var err error
- if ctxt.OpenFile != nil {
- rd, err = ctxt.OpenFile(file)
- } else {
- rd, err = os.Open(file)
- }
- if err != nil {
- errors[i] = err // open failed
- return
- }
-
- // ParseFile may return both an AST and an error.
- parsed[i], errors[i] = parser.ParseFile(fset, displayPath(file), rd, mode)
- rd.Close()
- }(i, file)
- }
- wg.Wait()
-
- // Eliminate nils, preserving order.
- var o int
- for _, f := range parsed {
- if f != nil {
- parsed[o] = f
- o++
- }
- }
- parsed = parsed[:o]
-
- o = 0
- for _, err := range errors {
- if err != nil {
- errors[o] = err
- o++
- }
- }
- errors = errors[:o]
-
- return parsed, errors
-}
-
-// scanImports returns the set of all import paths from all
-// import specs in the specified files.
-func scanImports(files []*ast.File) map[string]bool {
- imports := make(map[string]bool)
- for _, f := range files {
- for _, decl := range f.Decls {
- if decl, ok := decl.(*ast.GenDecl); ok && decl.Tok == token.IMPORT {
- for _, spec := range decl.Specs {
- spec := spec.(*ast.ImportSpec)
-
- // NB: do not assume the program is well-formed!
- path, err := strconv.Unquote(spec.Path.Value)
- if err != nil {
- continue // quietly ignore the error
- }
- if path == "C" {
- continue // skip pseudopackage
- }
- imports[path] = true
- }
- }
- }
- }
- return imports
-}
-
-// ---------- Internal helpers ----------
-
-// TODO(adonovan): make this a method: func (*token.File) Contains(token.Pos)
-func tokenFileContainsPos(f *token.File, pos token.Pos) bool {
- p := int(pos)
- base := f.Base()
- return base <= p && p < base+f.Size()
-}
diff --git a/vendor/golang.org/x/tools/go/packages/doc.go b/vendor/golang.org/x/tools/go/packages/doc.go
deleted file mode 100644
index 4bfe28a51ff..00000000000
--- a/vendor/golang.org/x/tools/go/packages/doc.go
+++ /dev/null
@@ -1,221 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-Package packages loads Go packages for inspection and analysis.
-
-The Load function takes as input a list of patterns and return a list of Package
-structs describing individual packages matched by those patterns.
-The LoadMode controls the amount of detail in the loaded packages.
-
-Load passes most patterns directly to the underlying build tool,
-but all patterns with the prefix "query=", where query is a
-non-empty string of letters from [a-z], are reserved and may be
-interpreted as query operators.
-
-Two query operators are currently supported: "file" and "pattern".
-
-The query "file=path/to/file.go" matches the package or packages enclosing
-the Go source file path/to/file.go. For example "file=~/go/src/fmt/print.go"
-might return the packages "fmt" and "fmt [fmt.test]".
-
-The query "pattern=string" causes "string" to be passed directly to
-the underlying build tool. In most cases this is unnecessary,
-but an application can use Load("pattern=" + x) as an escaping mechanism
-to ensure that x is not interpreted as a query operator if it contains '='.
-
-All other query operators are reserved for future use and currently
-cause Load to report an error.
-
-The Package struct provides basic information about the package, including
-
- - ID, a unique identifier for the package in the returned set;
- - GoFiles, the names of the package's Go source files;
- - Imports, a map from source import strings to the Packages they name;
- - Types, the type information for the package's exported symbols;
- - Syntax, the parsed syntax trees for the package's source code; and
- - TypeInfo, the result of a complete type-check of the package syntax trees.
-
-(See the documentation for type Package for the complete list of fields
-and more detailed descriptions.)
-
-For example,
-
- Load(nil, "bytes", "unicode...")
-
-returns four Package structs describing the standard library packages
-bytes, unicode, unicode/utf16, and unicode/utf8. Note that one pattern
-can match multiple packages and that a package might be matched by
-multiple patterns: in general it is not possible to determine which
-packages correspond to which patterns.
-
-Note that the list returned by Load contains only the packages matched
-by the patterns. Their dependencies can be found by walking the import
-graph using the Imports fields.
-
-The Load function can be configured by passing a pointer to a Config as
-the first argument. A nil Config is equivalent to the zero Config, which
-causes Load to run in LoadFiles mode, collecting minimal information.
-See the documentation for type Config for details.
-
-As noted earlier, the Config.Mode controls the amount of detail
-reported about the loaded packages. See the documentation for type LoadMode
-for details.
-
-Most tools should pass their command-line arguments (after any flags)
-uninterpreted to the loader, so that the loader can interpret them
-according to the conventions of the underlying build system.
-See the Example function for typical usage.
-
-*/
-package packages // import "golang.org/x/tools/go/packages"
-
-/*
-
-Motivation and design considerations
-
-The new package's design solves problems addressed by two existing
-packages: go/build, which locates and describes packages, and
-golang.org/x/tools/go/loader, which loads, parses and type-checks them.
-The go/build.Package structure encodes too much of the 'go build' way
-of organizing projects, leaving us in need of a data type that describes a
-package of Go source code independent of the underlying build system.
-We wanted something that works equally well with go build and vgo, and
-also other build systems such as Bazel and Blaze, making it possible to
-construct analysis tools that work in all these environments.
-Tools such as errcheck and staticcheck were essentially unavailable to
-the Go community at Google, and some of Google's internal tools for Go
-are unavailable externally.
-This new package provides a uniform way to obtain package metadata by
-querying each of these build systems, optionally supporting their
-preferred command-line notations for packages, so that tools integrate
-neatly with users' build environments. The Metadata query function
-executes an external query tool appropriate to the current workspace.
-
-Loading packages always returns the complete import graph "all the way down",
-even if all you want is information about a single package, because the query
-mechanisms of all the build systems we currently support ({go,vgo} list, and
-blaze/bazel aspect-based query) cannot provide detailed information
-about one package without visiting all its dependencies too, so there is
-no additional asymptotic cost to providing transitive information.
-(This property might not be true of a hypothetical 5th build system.)
-
-In calls to TypeCheck, all initial packages, and any package that
-transitively depends on one of them, must be loaded from source.
-Consider A->B->C->D->E: if A,C are initial, A,B,C must be loaded from
-source; D may be loaded from export data, and E may not be loaded at all
-(though it's possible that D's export data mentions it, so a
-types.Package may be created for it and exposed.)
-
-The old loader had a feature to suppress type-checking of function
-bodies on a per-package basis, primarily intended to reduce the work of
-obtaining type information for imported packages. Now that imports are
-satisfied by export data, the optimization no longer seems necessary.
-
-Despite some early attempts, the old loader did not exploit export data,
-instead always using the equivalent of WholeProgram mode. This was due
-to the complexity of mixing source and export data packages (now
-resolved by the upward traversal mentioned above), and because export data
-files were nearly always missing or stale. Now that 'go build' supports
-caching, all the underlying build systems can guarantee to produce
-export data in a reasonable (amortized) time.
-
-Test "main" packages synthesized by the build system are now reported as
-first-class packages, avoiding the need for clients (such as go/ssa) to
-reinvent this generation logic.
-
-One way in which go/packages is simpler than the old loader is in its
-treatment of in-package tests. In-package tests are packages that
-consist of all the files of the library under test, plus the test files.
-The old loader constructed in-package tests by a two-phase process of
-mutation called "augmentation": first it would construct and type check
-all the ordinary library packages and type-check the packages that
-depend on them; then it would add more (test) files to the package and
-type-check again. This two-phase approach had four major problems:
-1) in processing the tests, the loader modified the library package,
- leaving no way for a client application to see both the test
- package and the library package; one would mutate into the other.
-2) because test files can declare additional methods on types defined in
- the library portion of the package, the dispatch of method calls in
- the library portion was affected by the presence of the test files.
- This should have been a clue that the packages were logically
- different.
-3) this model of "augmentation" assumed at most one in-package test
- per library package, which is true of projects using 'go build',
- but not other build systems.
-4) because of the two-phase nature of test processing, all packages that
- import the library package had to be processed before augmentation,
- forcing a "one-shot" API and preventing the client from calling Load
- in several times in sequence as is now possible in WholeProgram mode.
- (TypeCheck mode has a similar one-shot restriction for a different reason.)
-
-Early drafts of this package supported "multi-shot" operation.
-Although it allowed clients to make a sequence of calls (or concurrent
-calls) to Load, building up the graph of Packages incrementally,
-it was of marginal value: it complicated the API
-(since it allowed some options to vary across calls but not others),
-it complicated the implementation,
-it cannot be made to work in Types mode, as explained above,
-and it was less efficient than making one combined call (when this is possible).
-Among the clients we have inspected, none made multiple calls to load
-but could not be easily and satisfactorily modified to make only a single call.
-However, applications changes may be required.
-For example, the ssadump command loads the user-specified packages
-and in addition the runtime package. It is tempting to simply append
-"runtime" to the user-provided list, but that does not work if the user
-specified an ad-hoc package such as [a.go b.go].
-Instead, ssadump no longer requests the runtime package,
-but seeks it among the dependencies of the user-specified packages,
-and emits an error if it is not found.
-
-Overlays: The Overlay field in the Config allows providing alternate contents
-for Go source files, by providing a mapping from file path to contents.
-go/packages will pull in new imports added in overlay files when go/packages
-is run in LoadImports mode or greater.
-Overlay support for the go list driver isn't complete yet: if the file doesn't
-exist on disk, it will only be recognized in an overlay if it is a non-test file
-and the package would be reported even without the overlay.
-
-Questions & Tasks
-
-- Add GOARCH/GOOS?
- They are not portable concepts, but could be made portable.
- Our goal has been to allow users to express themselves using the conventions
- of the underlying build system: if the build system honors GOARCH
- during a build and during a metadata query, then so should
- applications built atop that query mechanism.
- Conversely, if the target architecture of the build is determined by
- command-line flags, the application can pass the relevant
- flags through to the build system using a command such as:
- myapp -query_flag="--cpu=amd64" -query_flag="--os=darwin"
- However, this approach is low-level, unwieldy, and non-portable.
- GOOS and GOARCH seem important enough to warrant a dedicated option.
-
-- How should we handle partial failures such as a mixture of good and
- malformed patterns, existing and non-existent packages, successful and
- failed builds, import failures, import cycles, and so on, in a call to
- Load?
-
-- Support bazel, blaze, and go1.10 list, not just go1.11 list.
-
-- Handle (and test) various partial success cases, e.g.
- a mixture of good packages and:
- invalid patterns
- nonexistent packages
- empty packages
- packages with malformed package or import declarations
- unreadable files
- import cycles
- other parse errors
- type errors
- Make sure we record errors at the correct place in the graph.
-
-- Missing packages among initial arguments are not reported.
- Return bogus packages for them, like golist does.
-
-- "undeclared name" errors (for example) are reported out of source file
- order. I suspect this is due to the breadth-first resolution now used
- by go/types. Is that a bug? Discuss with gri.
-
-*/
diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go
deleted file mode 100644
index 8c8473fd0bd..00000000000
--- a/vendor/golang.org/x/tools/go/packages/external.go
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file enables an external tool to intercept package requests.
-// If the tool is present then its results are used in preference to
-// the go list command.
-
-package packages
-
-import (
- "bytes"
- "encoding/json"
- "fmt"
- "os"
- "os/exec"
- "strings"
-)
-
-// The Driver Protocol
-//
-// The driver, given the inputs to a call to Load, returns metadata about the packages specified.
-// This allows for different build systems to support go/packages by telling go/packages how the
-// packages' source is organized.
-// The driver is a binary, either specified by the GOPACKAGESDRIVER environment variable or in
-// the path as gopackagesdriver. It's given the inputs to load in its argv. See the package
-// documentation in doc.go for the full description of the patterns that need to be supported.
-// A driver receives as a JSON-serialized driverRequest struct in standard input and will
-// produce a JSON-serialized driverResponse (see definition in packages.go) in its standard output.
-
-// driverRequest is used to provide the portion of Load's Config that is needed by a driver.
-type driverRequest struct {
- Mode LoadMode `json:"mode"`
- // Env specifies the environment the underlying build system should be run in.
- Env []string `json:"env"`
- // BuildFlags are flags that should be passed to the underlying build system.
- BuildFlags []string `json:"build_flags"`
- // Tests specifies whether the patterns should also return test packages.
- Tests bool `json:"tests"`
- // Overlay maps file paths (relative to the driver's working directory) to the byte contents
- // of overlay files.
- Overlay map[string][]byte `json:"overlay"`
-}
-
-// findExternalDriver returns the file path of a tool that supplies
-// the build system package structure, or "" if not found."
-// If GOPACKAGESDRIVER is set in the environment findExternalTool returns its
-// value, otherwise it searches for a binary named gopackagesdriver on the PATH.
-func findExternalDriver(cfg *Config) driver {
- const toolPrefix = "GOPACKAGESDRIVER="
- tool := ""
- for _, env := range cfg.Env {
- if val := strings.TrimPrefix(env, toolPrefix); val != env {
- tool = val
- }
- }
- if tool != "" && tool == "off" {
- return nil
- }
- if tool == "" {
- var err error
- tool, err = exec.LookPath("gopackagesdriver")
- if err != nil {
- return nil
- }
- }
- return func(cfg *Config, words ...string) (*driverResponse, error) {
- req, err := json.Marshal(driverRequest{
- Mode: cfg.Mode,
- Env: cfg.Env,
- BuildFlags: cfg.BuildFlags,
- Tests: cfg.Tests,
- Overlay: cfg.Overlay,
- })
- if err != nil {
- return nil, fmt.Errorf("failed to encode message to driver tool: %v", err)
- }
-
- buf := new(bytes.Buffer)
- stderr := new(bytes.Buffer)
- cmd := exec.CommandContext(cfg.Context, tool, words...)
- cmd.Dir = cfg.Dir
- cmd.Env = cfg.Env
- cmd.Stdin = bytes.NewReader(req)
- cmd.Stdout = buf
- cmd.Stderr = stderr
-
- if err := cmd.Run(); err != nil {
- return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr)
- }
- if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTDRIVERERRORS") != "" {
- fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd, words...), stderr)
- }
-
- var response driverResponse
- if err := json.Unmarshal(buf.Bytes(), &response); err != nil {
- return nil, err
- }
- return &response, nil
- }
-}
diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go
deleted file mode 100644
index 220d409878e..00000000000
--- a/vendor/golang.org/x/tools/go/packages/golist.go
+++ /dev/null
@@ -1,940 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packages
-
-import (
- "bytes"
- "context"
- "encoding/json"
- "fmt"
- "go/types"
- "log"
- "os"
- "os/exec"
- "path"
- "path/filepath"
- "reflect"
- "sort"
- "strconv"
- "strings"
- "sync"
- "unicode"
-
- "golang.org/x/tools/go/internal/packagesdriver"
- "golang.org/x/tools/internal/gocommand"
- "golang.org/x/xerrors"
-)
-
-// debug controls verbose logging.
-var debug, _ = strconv.ParseBool(os.Getenv("GOPACKAGESDEBUG"))
-
-// A goTooOldError reports that the go command
-// found by exec.LookPath is too old to use the new go list behavior.
-type goTooOldError struct {
- error
-}
-
-// responseDeduper wraps a driverResponse, deduplicating its contents.
-type responseDeduper struct {
- seenRoots map[string]bool
- seenPackages map[string]*Package
- dr *driverResponse
-}
-
-func newDeduper() *responseDeduper {
- return &responseDeduper{
- dr: &driverResponse{},
- seenRoots: map[string]bool{},
- seenPackages: map[string]*Package{},
- }
-}
-
-// addAll fills in r with a driverResponse.
-func (r *responseDeduper) addAll(dr *driverResponse) {
- for _, pkg := range dr.Packages {
- r.addPackage(pkg)
- }
- for _, root := range dr.Roots {
- r.addRoot(root)
- }
-}
-
-func (r *responseDeduper) addPackage(p *Package) {
- if r.seenPackages[p.ID] != nil {
- return
- }
- r.seenPackages[p.ID] = p
- r.dr.Packages = append(r.dr.Packages, p)
-}
-
-func (r *responseDeduper) addRoot(id string) {
- if r.seenRoots[id] {
- return
- }
- r.seenRoots[id] = true
- r.dr.Roots = append(r.dr.Roots, id)
-}
-
-type golistState struct {
- cfg *Config
- ctx context.Context
-
- envOnce sync.Once
- goEnvError error
- goEnv map[string]string
-
- rootsOnce sync.Once
- rootDirsError error
- rootDirs map[string]string
-
- // vendorDirs caches the (non)existence of vendor directories.
- vendorDirs map[string]bool
-}
-
-// getEnv returns Go environment variables. Only specific variables are
-// populated -- computing all of them is slow.
-func (state *golistState) getEnv() (map[string]string, error) {
- state.envOnce.Do(func() {
- var b *bytes.Buffer
- b, state.goEnvError = state.invokeGo("env", "-json", "GOMOD", "GOPATH")
- if state.goEnvError != nil {
- return
- }
-
- state.goEnv = make(map[string]string)
- decoder := json.NewDecoder(b)
- if state.goEnvError = decoder.Decode(&state.goEnv); state.goEnvError != nil {
- return
- }
- })
- return state.goEnv, state.goEnvError
-}
-
-// mustGetEnv is a convenience function that can be used if getEnv has already succeeded.
-func (state *golistState) mustGetEnv() map[string]string {
- env, err := state.getEnv()
- if err != nil {
- panic(fmt.Sprintf("mustGetEnv: %v", err))
- }
- return env
-}
-
-// goListDriver uses the go list command to interpret the patterns and produce
-// the build system package structure.
-// See driver for more details.
-func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) {
- // Make sure that any asynchronous go commands are killed when we return.
- parentCtx := cfg.Context
- if parentCtx == nil {
- parentCtx = context.Background()
- }
- ctx, cancel := context.WithCancel(parentCtx)
- defer cancel()
-
- response := newDeduper()
-
- // Fill in response.Sizes asynchronously if necessary.
- var sizeserr error
- var sizeswg sync.WaitGroup
- if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 {
- sizeswg.Add(1)
- go func() {
- var sizes types.Sizes
- sizes, sizeserr = packagesdriver.GetSizesGolist(ctx, cfg.BuildFlags, cfg.Env, cfg.gocmdRunner, cfg.Dir)
- // types.SizesFor always returns nil or a *types.StdSizes.
- response.dr.Sizes, _ = sizes.(*types.StdSizes)
- sizeswg.Done()
- }()
- }
-
- state := &golistState{
- cfg: cfg,
- ctx: ctx,
- vendorDirs: map[string]bool{},
- }
-
- // Determine files requested in contains patterns
- var containFiles []string
- restPatterns := make([]string, 0, len(patterns))
- // Extract file= and other [querytype]= patterns. Report an error if querytype
- // doesn't exist.
-extractQueries:
- for _, pattern := range patterns {
- eqidx := strings.Index(pattern, "=")
- if eqidx < 0 {
- restPatterns = append(restPatterns, pattern)
- } else {
- query, value := pattern[:eqidx], pattern[eqidx+len("="):]
- switch query {
- case "file":
- containFiles = append(containFiles, value)
- case "pattern":
- restPatterns = append(restPatterns, value)
- case "": // not a reserved query
- restPatterns = append(restPatterns, pattern)
- default:
- for _, rune := range query {
- if rune < 'a' || rune > 'z' { // not a reserved query
- restPatterns = append(restPatterns, pattern)
- continue extractQueries
- }
- }
- // Reject all other patterns containing "="
- return nil, fmt.Errorf("invalid query type %q in query pattern %q", query, pattern)
- }
- }
- }
-
- // See if we have any patterns to pass through to go list. Zero initial
- // patterns also requires a go list call, since it's the equivalent of
- // ".".
- if len(restPatterns) > 0 || len(patterns) == 0 {
- dr, err := state.createDriverResponse(restPatterns...)
- if err != nil {
- return nil, err
- }
- response.addAll(dr)
- }
-
- if len(containFiles) != 0 {
- if err := state.runContainsQueries(response, containFiles); err != nil {
- return nil, err
- }
- }
-
- modifiedPkgs, needPkgs, err := state.processGolistOverlay(response)
- if err != nil {
- return nil, err
- }
-
- var containsCandidates []string
- if len(containFiles) > 0 {
- containsCandidates = append(containsCandidates, modifiedPkgs...)
- containsCandidates = append(containsCandidates, needPkgs...)
- }
- if err := state.addNeededOverlayPackages(response, needPkgs); err != nil {
- return nil, err
- }
- // Check candidate packages for containFiles.
- if len(containFiles) > 0 {
- for _, id := range containsCandidates {
- pkg, ok := response.seenPackages[id]
- if !ok {
- response.addPackage(&Package{
- ID: id,
- Errors: []Error{
- {
- Kind: ListError,
- Msg: fmt.Sprintf("package %s expected but not seen", id),
- },
- },
- })
- continue
- }
- for _, f := range containFiles {
- for _, g := range pkg.GoFiles {
- if sameFile(f, g) {
- response.addRoot(id)
- }
- }
- }
- }
- }
-
- sizeswg.Wait()
- if sizeserr != nil {
- return nil, sizeserr
- }
- return response.dr, nil
-}
-
-func (state *golistState) addNeededOverlayPackages(response *responseDeduper, pkgs []string) error {
- if len(pkgs) == 0 {
- return nil
- }
- dr, err := state.createDriverResponse(pkgs...)
- if err != nil {
- return err
- }
- for _, pkg := range dr.Packages {
- response.addPackage(pkg)
- }
- _, needPkgs, err := state.processGolistOverlay(response)
- if err != nil {
- return err
- }
- return state.addNeededOverlayPackages(response, needPkgs)
-}
-
-func (state *golistState) runContainsQueries(response *responseDeduper, queries []string) error {
- for _, query := range queries {
- // TODO(matloob): Do only one query per directory.
- fdir := filepath.Dir(query)
- // Pass absolute path of directory to go list so that it knows to treat it as a directory,
- // not a package path.
- pattern, err := filepath.Abs(fdir)
- if err != nil {
- return fmt.Errorf("could not determine absolute path of file= query path %q: %v", query, err)
- }
- dirResponse, err := state.createDriverResponse(pattern)
-
- // If there was an error loading the package, or the package is returned
- // with errors, try to load the file as an ad-hoc package.
- // Usually the error will appear in a returned package, but may not if we're
- // in module mode and the ad-hoc is located outside a module.
- if err != nil || len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].GoFiles) == 0 &&
- len(dirResponse.Packages[0].Errors) == 1 {
- var queryErr error
- if dirResponse, queryErr = state.adhocPackage(pattern, query); queryErr != nil {
- return err // return the original error
- }
- }
- isRoot := make(map[string]bool, len(dirResponse.Roots))
- for _, root := range dirResponse.Roots {
- isRoot[root] = true
- }
- for _, pkg := range dirResponse.Packages {
- // Add any new packages to the main set
- // We don't bother to filter packages that will be dropped by the changes of roots,
- // that will happen anyway during graph construction outside this function.
- // Over-reporting packages is not a problem.
- response.addPackage(pkg)
- // if the package was not a root one, it cannot have the file
- if !isRoot[pkg.ID] {
- continue
- }
- for _, pkgFile := range pkg.GoFiles {
- if filepath.Base(query) == filepath.Base(pkgFile) {
- response.addRoot(pkg.ID)
- break
- }
- }
- }
- }
- return nil
-}
-
-// adhocPackage attempts to load or construct an ad-hoc package for a given
-// query, if the original call to the driver produced inadequate results.
-func (state *golistState) adhocPackage(pattern, query string) (*driverResponse, error) {
- response, err := state.createDriverResponse(query)
- if err != nil {
- return nil, err
- }
- // If we get nothing back from `go list`,
- // try to make this file into its own ad-hoc package.
- // TODO(rstambler): Should this check against the original response?
- if len(response.Packages) == 0 {
- response.Packages = append(response.Packages, &Package{
- ID: "command-line-arguments",
- PkgPath: query,
- GoFiles: []string{query},
- CompiledGoFiles: []string{query},
- Imports: make(map[string]*Package),
- })
- response.Roots = append(response.Roots, "command-line-arguments")
- }
- // Handle special cases.
- if len(response.Packages) == 1 {
- // golang/go#33482: If this is a file= query for ad-hoc packages where
- // the file only exists on an overlay, and exists outside of a module,
- // add the file to the package and remove the errors.
- if response.Packages[0].ID == "command-line-arguments" ||
- filepath.ToSlash(response.Packages[0].PkgPath) == filepath.ToSlash(query) {
- if len(response.Packages[0].GoFiles) == 0 {
- filename := filepath.Join(pattern, filepath.Base(query)) // avoid recomputing abspath
- // TODO(matloob): check if the file is outside of a root dir?
- for path := range state.cfg.Overlay {
- if path == filename {
- response.Packages[0].Errors = nil
- response.Packages[0].GoFiles = []string{path}
- response.Packages[0].CompiledGoFiles = []string{path}
- }
- }
- }
- }
- }
- return response, nil
-}
-
-// Fields must match go list;
-// see $GOROOT/src/cmd/go/internal/load/pkg.go.
-type jsonPackage struct {
- ImportPath string
- Dir string
- Name string
- Export string
- GoFiles []string
- CompiledGoFiles []string
- CFiles []string
- CgoFiles []string
- CXXFiles []string
- MFiles []string
- HFiles []string
- FFiles []string
- SFiles []string
- SwigFiles []string
- SwigCXXFiles []string
- SysoFiles []string
- Imports []string
- ImportMap map[string]string
- Deps []string
- Module *Module
- TestGoFiles []string
- TestImports []string
- XTestGoFiles []string
- XTestImports []string
- ForTest string // q in a "p [q.test]" package, else ""
- DepOnly bool
-
- Error *jsonPackageError
-}
-
-type jsonPackageError struct {
- ImportStack []string
- Pos string
- Err string
-}
-
-func otherFiles(p *jsonPackage) [][]string {
- return [][]string{p.CFiles, p.CXXFiles, p.MFiles, p.HFiles, p.FFiles, p.SFiles, p.SwigFiles, p.SwigCXXFiles, p.SysoFiles}
-}
-
-// createDriverResponse uses the "go list" command to expand the pattern
-// words and return a response for the specified packages.
-func (state *golistState) createDriverResponse(words ...string) (*driverResponse, error) {
- // go list uses the following identifiers in ImportPath and Imports:
- //
- // "p" -- importable package or main (command)
- // "q.test" -- q's test executable
- // "p [q.test]" -- variant of p as built for q's test executable
- // "q_test [q.test]" -- q's external test package
- //
- // The packages p that are built differently for a test q.test
- // are q itself, plus any helpers used by the external test q_test,
- // typically including "testing" and all its dependencies.
-
- // Run "go list" for complete
- // information on the specified packages.
- buf, err := state.invokeGo("list", golistargs(state.cfg, words)...)
- if err != nil {
- return nil, err
- }
- seen := make(map[string]*jsonPackage)
- pkgs := make(map[string]*Package)
- additionalErrors := make(map[string][]Error)
- // Decode the JSON and convert it to Package form.
- var response driverResponse
- for dec := json.NewDecoder(buf); dec.More(); {
- p := new(jsonPackage)
- if err := dec.Decode(p); err != nil {
- return nil, fmt.Errorf("JSON decoding failed: %v", err)
- }
-
- if p.ImportPath == "" {
- // The documentation for go list says that “[e]rroneous packages will have
- // a non-empty ImportPath”. If for some reason it comes back empty, we
- // prefer to error out rather than silently discarding data or handing
- // back a package without any way to refer to it.
- if p.Error != nil {
- return nil, Error{
- Pos: p.Error.Pos,
- Msg: p.Error.Err,
- }
- }
- return nil, fmt.Errorf("package missing import path: %+v", p)
- }
-
- // Work around https://golang.org/issue/33157:
- // go list -e, when given an absolute path, will find the package contained at
- // that directory. But when no package exists there, it will return a fake package
- // with an error and the ImportPath set to the absolute path provided to go list.
- // Try to convert that absolute path to what its package path would be if it's
- // contained in a known module or GOPATH entry. This will allow the package to be
- // properly "reclaimed" when overlays are processed.
- if filepath.IsAbs(p.ImportPath) && p.Error != nil {
- pkgPath, ok, err := state.getPkgPath(p.ImportPath)
- if err != nil {
- return nil, err
- }
- if ok {
- p.ImportPath = pkgPath
- }
- }
-
- if old, found := seen[p.ImportPath]; found {
- // If one version of the package has an error, and the other doesn't, assume
- // that this is a case where go list is reporting a fake dependency variant
- // of the imported package: When a package tries to invalidly import another
- // package, go list emits a variant of the imported package (with the same
- // import path, but with an error on it, and the package will have a
- // DepError set on it). An example of when this can happen is for imports of
- // main packages: main packages can not be imported, but they may be
- // separately matched and listed by another pattern.
- // See golang.org/issue/36188 for more details.
-
- // The plan is that eventually, hopefully in Go 1.15, the error will be
- // reported on the importing package rather than the duplicate "fake"
- // version of the imported package. Once all supported versions of Go
- // have the new behavior this logic can be deleted.
- // TODO(matloob): delete the workaround logic once all supported versions of
- // Go return the errors on the proper package.
-
- // There should be exactly one version of a package that doesn't have an
- // error.
- if old.Error == nil && p.Error == nil {
- if !reflect.DeepEqual(p, old) {
- return nil, fmt.Errorf("internal error: go list gives conflicting information for package %v", p.ImportPath)
- }
- continue
- }
-
- // Determine if this package's error needs to be bubbled up.
- // This is a hack, and we expect for go list to eventually set the error
- // on the package.
- if old.Error != nil {
- var errkind string
- if strings.Contains(old.Error.Err, "not an importable package") {
- errkind = "not an importable package"
- } else if strings.Contains(old.Error.Err, "use of internal package") && strings.Contains(old.Error.Err, "not allowed") {
- errkind = "use of internal package not allowed"
- }
- if errkind != "" {
- if len(old.Error.ImportStack) < 1 {
- return nil, fmt.Errorf(`internal error: go list gave a %q error with empty import stack`, errkind)
- }
- importingPkg := old.Error.ImportStack[len(old.Error.ImportStack)-1]
- if importingPkg == old.ImportPath {
- // Using an older version of Go which put this package itself on top of import
- // stack, instead of the importer. Look for importer in second from top
- // position.
- if len(old.Error.ImportStack) < 2 {
- return nil, fmt.Errorf(`internal error: go list gave a %q error with an import stack without importing package`, errkind)
- }
- importingPkg = old.Error.ImportStack[len(old.Error.ImportStack)-2]
- }
- additionalErrors[importingPkg] = append(additionalErrors[importingPkg], Error{
- Pos: old.Error.Pos,
- Msg: old.Error.Err,
- Kind: ListError,
- })
- }
- }
-
- // Make sure that if there's a version of the package without an error,
- // that's the one reported to the user.
- if old.Error == nil {
- continue
- }
-
- // This package will replace the old one at the end of the loop.
- }
- seen[p.ImportPath] = p
-
- pkg := &Package{
- Name: p.Name,
- ID: p.ImportPath,
- GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles),
- CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles),
- OtherFiles: absJoin(p.Dir, otherFiles(p)...),
- forTest: p.ForTest,
- Module: p.Module,
- }
-
- if (state.cfg.Mode&typecheckCgo) != 0 && len(p.CgoFiles) != 0 {
- if len(p.CompiledGoFiles) > len(p.GoFiles) {
- // We need the cgo definitions, which are in the first
- // CompiledGoFile after the non-cgo ones. This is a hack but there
- // isn't currently a better way to find it. We also need the pure
- // Go files and unprocessed cgo files, all of which are already
- // in pkg.GoFiles.
- cgoTypes := p.CompiledGoFiles[len(p.GoFiles)]
- pkg.CompiledGoFiles = append([]string{cgoTypes}, pkg.GoFiles...)
- } else {
- // golang/go#38990: go list silently fails to do cgo processing
- pkg.CompiledGoFiles = nil
- pkg.Errors = append(pkg.Errors, Error{
- Msg: "go list failed to return CompiledGoFiles; https://golang.org/issue/38990?",
- Kind: ListError,
- })
- }
- }
-
- // Work around https://golang.org/issue/28749:
- // cmd/go puts assembly, C, and C++ files in CompiledGoFiles.
- // Filter out any elements of CompiledGoFiles that are also in OtherFiles.
- // We have to keep this workaround in place until go1.12 is a distant memory.
- if len(pkg.OtherFiles) > 0 {
- other := make(map[string]bool, len(pkg.OtherFiles))
- for _, f := range pkg.OtherFiles {
- other[f] = true
- }
-
- out := pkg.CompiledGoFiles[:0]
- for _, f := range pkg.CompiledGoFiles {
- if other[f] {
- continue
- }
- out = append(out, f)
- }
- pkg.CompiledGoFiles = out
- }
-
- // Extract the PkgPath from the package's ID.
- if i := strings.IndexByte(pkg.ID, ' '); i >= 0 {
- pkg.PkgPath = pkg.ID[:i]
- } else {
- pkg.PkgPath = pkg.ID
- }
-
- if pkg.PkgPath == "unsafe" {
- pkg.GoFiles = nil // ignore fake unsafe.go file
- }
-
- // Assume go list emits only absolute paths for Dir.
- if p.Dir != "" && !filepath.IsAbs(p.Dir) {
- log.Fatalf("internal error: go list returned non-absolute Package.Dir: %s", p.Dir)
- }
-
- if p.Export != "" && !filepath.IsAbs(p.Export) {
- pkg.ExportFile = filepath.Join(p.Dir, p.Export)
- } else {
- pkg.ExportFile = p.Export
- }
-
- // imports
- //
- // Imports contains the IDs of all imported packages.
- // ImportsMap records (path, ID) only where they differ.
- ids := make(map[string]bool)
- for _, id := range p.Imports {
- ids[id] = true
- }
- pkg.Imports = make(map[string]*Package)
- for path, id := range p.ImportMap {
- pkg.Imports[path] = &Package{ID: id} // non-identity import
- delete(ids, id)
- }
- for id := range ids {
- if id == "C" {
- continue
- }
-
- pkg.Imports[id] = &Package{ID: id} // identity import
- }
- if !p.DepOnly {
- response.Roots = append(response.Roots, pkg.ID)
- }
-
- // Work around for pre-go.1.11 versions of go list.
- // TODO(matloob): they should be handled by the fallback.
- // Can we delete this?
- if len(pkg.CompiledGoFiles) == 0 {
- pkg.CompiledGoFiles = pkg.GoFiles
- }
-
- // Temporary work-around for golang/go#39986. Parse filenames out of
- // error messages. This happens if there are unrecoverable syntax
- // errors in the source, so we can't match on a specific error message.
- if err := p.Error; err != nil && len(err.ImportStack) == 0 && len(pkg.CompiledGoFiles) == 0 {
- addFilenameFromPos := func(pos string) bool {
- split := strings.Split(pos, ":")
- if len(split) < 1 {
- return false
- }
- filename := strings.TrimSpace(split[0])
- if filename == "" {
- return false
- }
- if !filepath.IsAbs(filename) {
- filename = filepath.Join(state.cfg.Dir, filename)
- }
- info, _ := os.Stat(filename)
- if info == nil {
- return false
- }
- pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, filename)
- pkg.GoFiles = append(pkg.GoFiles, filename)
- return true
- }
- found := addFilenameFromPos(err.Pos)
- // In some cases, go list only reports the error position in the
- // error text, not the error position. One such case is when the
- // file's package name is a keyword (see golang.org/issue/39763).
- if !found {
- addFilenameFromPos(err.Err)
- }
- }
-
- if p.Error != nil {
- msg := strings.TrimSpace(p.Error.Err) // Trim to work around golang.org/issue/32363.
- // Address golang.org/issue/35964 by appending import stack to error message.
- if msg == "import cycle not allowed" && len(p.Error.ImportStack) != 0 {
- msg += fmt.Sprintf(": import stack: %v", p.Error.ImportStack)
- }
- pkg.Errors = append(pkg.Errors, Error{
- Pos: p.Error.Pos,
- Msg: msg,
- Kind: ListError,
- })
- }
-
- pkgs[pkg.ID] = pkg
- }
-
- for id, errs := range additionalErrors {
- if p, ok := pkgs[id]; ok {
- p.Errors = append(p.Errors, errs...)
- }
- }
- for _, pkg := range pkgs {
- response.Packages = append(response.Packages, pkg)
- }
- sort.Slice(response.Packages, func(i, j int) bool { return response.Packages[i].ID < response.Packages[j].ID })
-
- return &response, nil
-}
-
-// getPkgPath finds the package path of a directory if it's relative to a root directory.
-func (state *golistState) getPkgPath(dir string) (string, bool, error) {
- absDir, err := filepath.Abs(dir)
- if err != nil {
- return "", false, err
- }
- roots, err := state.determineRootDirs()
- if err != nil {
- return "", false, err
- }
-
- for rdir, rpath := range roots {
- // Make sure that the directory is in the module,
- // to avoid creating a path relative to another module.
- if !strings.HasPrefix(absDir, rdir) {
- continue
- }
- // TODO(matloob): This doesn't properly handle symlinks.
- r, err := filepath.Rel(rdir, dir)
- if err != nil {
- continue
- }
- if rpath != "" {
- // We choose only one root even though the directory even it can belong in multiple modules
- // or GOPATH entries. This is okay because we only need to work with absolute dirs when a
- // file is missing from disk, for instance when gopls calls go/packages in an overlay.
- // Once the file is saved, gopls, or the next invocation of the tool will get the correct
- // result straight from golist.
- // TODO(matloob): Implement module tiebreaking?
- return path.Join(rpath, filepath.ToSlash(r)), true, nil
- }
- return filepath.ToSlash(r), true, nil
- }
- return "", false, nil
-}
-
-// absJoin absolutizes and flattens the lists of files.
-func absJoin(dir string, fileses ...[]string) (res []string) {
- for _, files := range fileses {
- for _, file := range files {
- if !filepath.IsAbs(file) {
- file = filepath.Join(dir, file)
- }
- res = append(res, file)
- }
- }
- return res
-}
-
-func golistargs(cfg *Config, words []string) []string {
- const findFlags = NeedImports | NeedTypes | NeedSyntax | NeedTypesInfo
- fullargs := []string{
- "-e", "-json",
- fmt.Sprintf("-compiled=%t", cfg.Mode&(NeedCompiledGoFiles|NeedSyntax|NeedTypes|NeedTypesInfo|NeedTypesSizes) != 0),
- fmt.Sprintf("-test=%t", cfg.Tests),
- fmt.Sprintf("-export=%t", usesExportData(cfg)),
- fmt.Sprintf("-deps=%t", cfg.Mode&NeedImports != 0),
- // go list doesn't let you pass -test and -find together,
- // probably because you'd just get the TestMain.
- fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0),
- }
- fullargs = append(fullargs, cfg.BuildFlags...)
- fullargs = append(fullargs, "--")
- fullargs = append(fullargs, words...)
- return fullargs
-}
-
-// invokeGo returns the stdout of a go command invocation.
-func (state *golistState) invokeGo(verb string, args ...string) (*bytes.Buffer, error) {
- cfg := state.cfg
-
- inv := gocommand.Invocation{
- Verb: verb,
- Args: args,
- BuildFlags: cfg.BuildFlags,
- Env: cfg.Env,
- Logf: cfg.Logf,
- WorkingDir: cfg.Dir,
- }
- gocmdRunner := cfg.gocmdRunner
- if gocmdRunner == nil {
- gocmdRunner = &gocommand.Runner{}
- }
- stdout, stderr, _, err := gocmdRunner.RunRaw(cfg.Context, inv)
- if err != nil {
- // Check for 'go' executable not being found.
- if ee, ok := err.(*exec.Error); ok && ee.Err == exec.ErrNotFound {
- return nil, fmt.Errorf("'go list' driver requires 'go', but %s", exec.ErrNotFound)
- }
-
- exitErr, ok := err.(*exec.ExitError)
- if !ok {
- // Catastrophic error:
- // - context cancellation
- return nil, xerrors.Errorf("couldn't run 'go': %w", err)
- }
-
- // Old go version?
- if strings.Contains(stderr.String(), "flag provided but not defined") {
- return nil, goTooOldError{fmt.Errorf("unsupported version of go: %s: %s", exitErr, stderr)}
- }
-
- // Related to #24854
- if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "unexpected directory layout") {
- return nil, fmt.Errorf("%s", stderr.String())
- }
-
- // Is there an error running the C compiler in cgo? This will be reported in the "Error" field
- // and should be suppressed by go list -e.
- //
- // This condition is not perfect yet because the error message can include other error messages than runtime/cgo.
- isPkgPathRune := func(r rune) bool {
- // From https://golang.org/ref/spec#Import_declarations:
- // Implementation restriction: A compiler may restrict ImportPaths to non-empty strings
- // using only characters belonging to Unicode's L, M, N, P, and S general categories
- // (the Graphic characters without spaces) and may also exclude the
- // characters !"#$%&'()*,:;<=>?[\]^`{|} and the Unicode replacement character U+FFFD.
- return unicode.IsOneOf([]*unicode.RangeTable{unicode.L, unicode.M, unicode.N, unicode.P, unicode.S}, r) &&
- !strings.ContainsRune("!\"#$%&'()*,:;<=>?[\\]^`{|}\uFFFD", r)
- }
- if len(stderr.String()) > 0 && strings.HasPrefix(stderr.String(), "# ") {
- msg := stderr.String()[len("# "):]
- if strings.HasPrefix(strings.TrimLeftFunc(msg, isPkgPathRune), "\n") {
- return stdout, nil
- }
- // Treat pkg-config errors as a special case (golang.org/issue/36770).
- if strings.HasPrefix(msg, "pkg-config") {
- return stdout, nil
- }
- }
-
- // This error only appears in stderr. See golang.org/cl/166398 for a fix in go list to show
- // the error in the Err section of stdout in case -e option is provided.
- // This fix is provided for backwards compatibility.
- if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "named files must be .go files") {
- output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
- strings.Trim(stderr.String(), "\n"))
- return bytes.NewBufferString(output), nil
- }
-
- // Similar to the previous error, but currently lacks a fix in Go.
- if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "named files must all be in one directory") {
- output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
- strings.Trim(stderr.String(), "\n"))
- return bytes.NewBufferString(output), nil
- }
-
- // Backwards compatibility for Go 1.11 because 1.12 and 1.13 put the directory in the ImportPath.
- // If the package doesn't exist, put the absolute path of the directory into the error message,
- // as Go 1.13 list does.
- const noSuchDirectory = "no such directory"
- if len(stderr.String()) > 0 && strings.Contains(stderr.String(), noSuchDirectory) {
- errstr := stderr.String()
- abspath := strings.TrimSpace(errstr[strings.Index(errstr, noSuchDirectory)+len(noSuchDirectory):])
- output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
- abspath, strings.Trim(stderr.String(), "\n"))
- return bytes.NewBufferString(output), nil
- }
-
- // Workaround for #29280: go list -e has incorrect behavior when an ad-hoc package doesn't exist.
- // Note that the error message we look for in this case is different that the one looked for above.
- if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no such file or directory") {
- output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
- strings.Trim(stderr.String(), "\n"))
- return bytes.NewBufferString(output), nil
- }
-
- // Workaround for #34273. go list -e with GO111MODULE=on has incorrect behavior when listing a
- // directory outside any module.
- if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "outside available modules") {
- output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
- // TODO(matloob): command-line-arguments isn't correct here.
- "command-line-arguments", strings.Trim(stderr.String(), "\n"))
- return bytes.NewBufferString(output), nil
- }
-
- // Another variation of the previous error
- if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "outside module root") {
- output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
- // TODO(matloob): command-line-arguments isn't correct here.
- "command-line-arguments", strings.Trim(stderr.String(), "\n"))
- return bytes.NewBufferString(output), nil
- }
-
- // Workaround for an instance of golang.org/issue/26755: go list -e will return a non-zero exit
- // status if there's a dependency on a package that doesn't exist. But it should return
- // a zero exit status and set an error on that package.
- if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no Go files in") {
- // Don't clobber stdout if `go list` actually returned something.
- if len(stdout.String()) > 0 {
- return stdout, nil
- }
- // try to extract package name from string
- stderrStr := stderr.String()
- var importPath string
- colon := strings.Index(stderrStr, ":")
- if colon > 0 && strings.HasPrefix(stderrStr, "go build ") {
- importPath = stderrStr[len("go build "):colon]
- }
- output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`,
- importPath, strings.Trim(stderrStr, "\n"))
- return bytes.NewBufferString(output), nil
- }
-
- // Export mode entails a build.
- // If that build fails, errors appear on stderr
- // (despite the -e flag) and the Export field is blank.
- // Do not fail in that case.
- // The same is true if an ad-hoc package given to go list doesn't exist.
- // TODO(matloob): Remove these once we can depend on go list to exit with a zero status with -e even when
- // packages don't exist or a build fails.
- if !usesExportData(cfg) && !containsGoFile(args) {
- return nil, fmt.Errorf("go %v: %s: %s", args, exitErr, stderr)
- }
- }
- return stdout, nil
-}
-
-func containsGoFile(s []string) bool {
- for _, f := range s {
- if strings.HasSuffix(f, ".go") {
- return true
- }
- }
- return false
-}
-
-func cmdDebugStr(cmd *exec.Cmd, args ...string) string {
- env := make(map[string]string)
- for _, kv := range cmd.Env {
- split := strings.Split(kv, "=")
- k, v := split[0], split[1]
- env[k] = v
- }
- var quotedArgs []string
- for _, arg := range args {
- quotedArgs = append(quotedArgs, strconv.Quote(arg))
- }
-
- return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v PWD=%v go %s", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["PWD"], strings.Join(quotedArgs, " "))
-}
diff --git a/vendor/golang.org/x/tools/go/packages/golist_overlay.go b/vendor/golang.org/x/tools/go/packages/golist_overlay.go
deleted file mode 100644
index 4eabfd98c63..00000000000
--- a/vendor/golang.org/x/tools/go/packages/golist_overlay.go
+++ /dev/null
@@ -1,473 +0,0 @@
-package packages
-
-import (
- "encoding/json"
- "fmt"
- "go/parser"
- "go/token"
- "log"
- "os"
- "path/filepath"
- "sort"
- "strconv"
- "strings"
-)
-
-// processGolistOverlay provides rudimentary support for adding
-// files that don't exist on disk to an overlay. The results can be
-// sometimes incorrect.
-// TODO(matloob): Handle unsupported cases, including the following:
-// - determining the correct package to add given a new import path
-func (state *golistState) processGolistOverlay(response *responseDeduper) (modifiedPkgs, needPkgs []string, err error) {
- havePkgs := make(map[string]string) // importPath -> non-test package ID
- needPkgsSet := make(map[string]bool)
- modifiedPkgsSet := make(map[string]bool)
-
- pkgOfDir := make(map[string][]*Package)
- for _, pkg := range response.dr.Packages {
- // This is an approximation of import path to id. This can be
- // wrong for tests, vendored packages, and a number of other cases.
- havePkgs[pkg.PkgPath] = pkg.ID
- x := commonDir(pkg.GoFiles)
- if x != "" {
- pkgOfDir[x] = append(pkgOfDir[x], pkg)
- }
- }
-
- // If no new imports are added, it is safe to avoid loading any needPkgs.
- // Otherwise, it's hard to tell which package is actually being loaded
- // (due to vendoring) and whether any modified package will show up
- // in the transitive set of dependencies (because new imports are added,
- // potentially modifying the transitive set of dependencies).
- var overlayAddsImports bool
-
- // If both a package and its test package are created by the overlay, we
- // need the real package first. Process all non-test files before test
- // files, and make the whole process deterministic while we're at it.
- var overlayFiles []string
- for opath := range state.cfg.Overlay {
- overlayFiles = append(overlayFiles, opath)
- }
- sort.Slice(overlayFiles, func(i, j int) bool {
- iTest := strings.HasSuffix(overlayFiles[i], "_test.go")
- jTest := strings.HasSuffix(overlayFiles[j], "_test.go")
- if iTest != jTest {
- return !iTest // non-tests are before tests.
- }
- return overlayFiles[i] < overlayFiles[j]
- })
- for _, opath := range overlayFiles {
- contents := state.cfg.Overlay[opath]
- base := filepath.Base(opath)
- dir := filepath.Dir(opath)
- var pkg *Package // if opath belongs to both a package and its test variant, this will be the test variant
- var testVariantOf *Package // if opath is a test file, this is the package it is testing
- var fileExists bool
- isTestFile := strings.HasSuffix(opath, "_test.go")
- pkgName, ok := extractPackageName(opath, contents)
- if !ok {
- // Don't bother adding a file that doesn't even have a parsable package statement
- // to the overlay.
- continue
- }
- // If all the overlay files belong to a different package, change the
- // package name to that package.
- maybeFixPackageName(pkgName, isTestFile, pkgOfDir[dir])
- nextPackage:
- for _, p := range response.dr.Packages {
- if pkgName != p.Name && p.ID != "command-line-arguments" {
- continue
- }
- for _, f := range p.GoFiles {
- if !sameFile(filepath.Dir(f), dir) {
- continue
- }
- // Make sure to capture information on the package's test variant, if needed.
- if isTestFile && !hasTestFiles(p) {
- // TODO(matloob): Are there packages other than the 'production' variant
- // of a package that this can match? This shouldn't match the test main package
- // because the file is generated in another directory.
- testVariantOf = p
- continue nextPackage
- }
- // We must have already seen the package of which this is a test variant.
- if pkg != nil && p != pkg && pkg.PkgPath == p.PkgPath {
- if hasTestFiles(p) {
- testVariantOf = pkg
- }
- }
- pkg = p
- if filepath.Base(f) == base {
- fileExists = true
- }
- }
- }
- // The overlay could have included an entirely new package or an
- // ad-hoc package. An ad-hoc package is one that we have manually
- // constructed from inadequate `go list` results for a file= query.
- // It will have the ID command-line-arguments.
- if pkg == nil || pkg.ID == "command-line-arguments" {
- // Try to find the module or gopath dir the file is contained in.
- // Then for modules, add the module opath to the beginning.
- pkgPath, ok, err := state.getPkgPath(dir)
- if err != nil {
- return nil, nil, err
- }
- if !ok {
- break
- }
- var forTest string // only set for x tests
- isXTest := strings.HasSuffix(pkgName, "_test")
- if isXTest {
- forTest = pkgPath
- pkgPath += "_test"
- }
- id := pkgPath
- if isTestFile {
- if isXTest {
- id = fmt.Sprintf("%s [%s.test]", pkgPath, forTest)
- } else {
- id = fmt.Sprintf("%s [%s.test]", pkgPath, pkgPath)
- }
- }
- if pkg != nil {
- // TODO(rstambler): We should change the package's path and ID
- // here. The only issue is that this messes with the roots.
- } else {
- // Try to reclaim a package with the same ID, if it exists in the response.
- for _, p := range response.dr.Packages {
- if reclaimPackage(p, id, opath, contents) {
- pkg = p
- break
- }
- }
- // Otherwise, create a new package.
- if pkg == nil {
- pkg = &Package{
- PkgPath: pkgPath,
- ID: id,
- Name: pkgName,
- Imports: make(map[string]*Package),
- }
- response.addPackage(pkg)
- havePkgs[pkg.PkgPath] = id
- // Add the production package's sources for a test variant.
- if isTestFile && !isXTest && testVariantOf != nil {
- pkg.GoFiles = append(pkg.GoFiles, testVariantOf.GoFiles...)
- pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, testVariantOf.CompiledGoFiles...)
- // Add the package under test and its imports to the test variant.
- pkg.forTest = testVariantOf.PkgPath
- for k, v := range testVariantOf.Imports {
- pkg.Imports[k] = &Package{ID: v.ID}
- }
- }
- if isXTest {
- pkg.forTest = forTest
- }
- }
- }
- }
- if !fileExists {
- pkg.GoFiles = append(pkg.GoFiles, opath)
- // TODO(matloob): Adding the file to CompiledGoFiles can exhibit the wrong behavior
- // if the file will be ignored due to its build tags.
- pkg.CompiledGoFiles = append(pkg.CompiledGoFiles, opath)
- modifiedPkgsSet[pkg.ID] = true
- }
- imports, err := extractImports(opath, contents)
- if err != nil {
- // Let the parser or type checker report errors later.
- continue
- }
- for _, imp := range imports {
- // TODO(rstambler): If the package is an x test and the import has
- // a test variant, make sure to replace it.
- if _, found := pkg.Imports[imp]; found {
- continue
- }
- overlayAddsImports = true
- id, ok := havePkgs[imp]
- if !ok {
- var err error
- id, err = state.resolveImport(dir, imp)
- if err != nil {
- return nil, nil, err
- }
- }
- pkg.Imports[imp] = &Package{ID: id}
- // Add dependencies to the non-test variant version of this package as well.
- if testVariantOf != nil {
- testVariantOf.Imports[imp] = &Package{ID: id}
- }
- }
- }
-
- // toPkgPath guesses the package path given the id.
- toPkgPath := func(sourceDir, id string) (string, error) {
- if i := strings.IndexByte(id, ' '); i >= 0 {
- return state.resolveImport(sourceDir, id[:i])
- }
- return state.resolveImport(sourceDir, id)
- }
-
- // Now that new packages have been created, do another pass to determine
- // the new set of missing packages.
- for _, pkg := range response.dr.Packages {
- for _, imp := range pkg.Imports {
- if len(pkg.GoFiles) == 0 {
- return nil, nil, fmt.Errorf("cannot resolve imports for package %q with no Go files", pkg.PkgPath)
- }
- pkgPath, err := toPkgPath(filepath.Dir(pkg.GoFiles[0]), imp.ID)
- if err != nil {
- return nil, nil, err
- }
- if _, ok := havePkgs[pkgPath]; !ok {
- needPkgsSet[pkgPath] = true
- }
- }
- }
-
- if overlayAddsImports {
- needPkgs = make([]string, 0, len(needPkgsSet))
- for pkg := range needPkgsSet {
- needPkgs = append(needPkgs, pkg)
- }
- }
- modifiedPkgs = make([]string, 0, len(modifiedPkgsSet))
- for pkg := range modifiedPkgsSet {
- modifiedPkgs = append(modifiedPkgs, pkg)
- }
- return modifiedPkgs, needPkgs, err
-}
-
-// resolveImport finds the the ID of a package given its import path.
-// In particular, it will find the right vendored copy when in GOPATH mode.
-func (state *golistState) resolveImport(sourceDir, importPath string) (string, error) {
- env, err := state.getEnv()
- if err != nil {
- return "", err
- }
- if env["GOMOD"] != "" {
- return importPath, nil
- }
-
- searchDir := sourceDir
- for {
- vendorDir := filepath.Join(searchDir, "vendor")
- exists, ok := state.vendorDirs[vendorDir]
- if !ok {
- info, err := os.Stat(vendorDir)
- exists = err == nil && info.IsDir()
- state.vendorDirs[vendorDir] = exists
- }
-
- if exists {
- vendoredPath := filepath.Join(vendorDir, importPath)
- if info, err := os.Stat(vendoredPath); err == nil && info.IsDir() {
- // We should probably check for .go files here, but shame on anyone who fools us.
- path, ok, err := state.getPkgPath(vendoredPath)
- if err != nil {
- return "", err
- }
- if ok {
- return path, nil
- }
- }
- }
-
- // We know we've hit the top of the filesystem when we Dir / and get /,
- // or C:\ and get C:\, etc.
- next := filepath.Dir(searchDir)
- if next == searchDir {
- break
- }
- searchDir = next
- }
- return importPath, nil
-}
-
-func hasTestFiles(p *Package) bool {
- for _, f := range p.GoFiles {
- if strings.HasSuffix(f, "_test.go") {
- return true
- }
- }
- return false
-}
-
-// determineRootDirs returns a mapping from absolute directories that could
-// contain code to their corresponding import path prefixes.
-func (state *golistState) determineRootDirs() (map[string]string, error) {
- env, err := state.getEnv()
- if err != nil {
- return nil, err
- }
- if env["GOMOD"] != "" {
- state.rootsOnce.Do(func() {
- state.rootDirs, state.rootDirsError = state.determineRootDirsModules()
- })
- } else {
- state.rootsOnce.Do(func() {
- state.rootDirs, state.rootDirsError = state.determineRootDirsGOPATH()
- })
- }
- return state.rootDirs, state.rootDirsError
-}
-
-func (state *golistState) determineRootDirsModules() (map[string]string, error) {
- // This will only return the root directory for the main module.
- // For now we only support overlays in main modules.
- // Editing files in the module cache isn't a great idea, so we don't
- // plan to ever support that, but editing files in replaced modules
- // is something we may want to support. To do that, we'll want to
- // do a go list -m to determine the replaced module's module path and
- // directory, and then a go list -m {{with .Replace}}{{.Dir}}{{end}}
- // from the main module to determine if that module is actually a replacement.
- // See bcmills's comment here: https://github.com/golang/go/issues/37629#issuecomment-594179751
- // for more information.
- out, err := state.invokeGo("list", "-m", "-json")
- if err != nil {
- return nil, err
- }
- m := map[string]string{}
- type jsonMod struct{ Path, Dir string }
- for dec := json.NewDecoder(out); dec.More(); {
- mod := new(jsonMod)
- if err := dec.Decode(mod); err != nil {
- return nil, err
- }
- if mod.Dir != "" && mod.Path != "" {
- // This is a valid module; add it to the map.
- absDir, err := filepath.Abs(mod.Dir)
- if err != nil {
- return nil, err
- }
- m[absDir] = mod.Path
- }
- }
- return m, nil
-}
-
-func (state *golistState) determineRootDirsGOPATH() (map[string]string, error) {
- m := map[string]string{}
- for _, dir := range filepath.SplitList(state.mustGetEnv()["GOPATH"]) {
- absDir, err := filepath.Abs(dir)
- if err != nil {
- return nil, err
- }
- m[filepath.Join(absDir, "src")] = ""
- }
- return m, nil
-}
-
-func extractImports(filename string, contents []byte) ([]string, error) {
- f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.ImportsOnly) // TODO(matloob): reuse fileset?
- if err != nil {
- return nil, err
- }
- var res []string
- for _, imp := range f.Imports {
- quotedPath := imp.Path.Value
- path, err := strconv.Unquote(quotedPath)
- if err != nil {
- return nil, err
- }
- res = append(res, path)
- }
- return res, nil
-}
-
-// reclaimPackage attempts to reuse a package that failed to load in an overlay.
-//
-// If the package has errors and has no Name, GoFiles, or Imports,
-// then it's possible that it doesn't yet exist on disk.
-func reclaimPackage(pkg *Package, id string, filename string, contents []byte) bool {
- // TODO(rstambler): Check the message of the actual error?
- // It differs between $GOPATH and module mode.
- if pkg.ID != id {
- return false
- }
- if len(pkg.Errors) != 1 {
- return false
- }
- if pkg.Name != "" || pkg.ExportFile != "" {
- return false
- }
- if len(pkg.GoFiles) > 0 || len(pkg.CompiledGoFiles) > 0 || len(pkg.OtherFiles) > 0 {
- return false
- }
- if len(pkg.Imports) > 0 {
- return false
- }
- pkgName, ok := extractPackageName(filename, contents)
- if !ok {
- return false
- }
- pkg.Name = pkgName
- pkg.Errors = nil
- return true
-}
-
-func extractPackageName(filename string, contents []byte) (string, bool) {
- // TODO(rstambler): Check the message of the actual error?
- // It differs between $GOPATH and module mode.
- f, err := parser.ParseFile(token.NewFileSet(), filename, contents, parser.PackageClauseOnly) // TODO(matloob): reuse fileset?
- if err != nil {
- return "", false
- }
- return f.Name.Name, true
-}
-
-func commonDir(a []string) string {
- seen := make(map[string]bool)
- x := append([]string{}, a...)
- for _, f := range x {
- seen[filepath.Dir(f)] = true
- }
- if len(seen) > 1 {
- log.Fatalf("commonDir saw %v for %v", seen, x)
- }
- for k := range seen {
- // len(seen) == 1
- return k
- }
- return "" // no files
-}
-
-// It is possible that the files in the disk directory dir have a different package
-// name from newName, which is deduced from the overlays. If they all have a different
-// package name, and they all have the same package name, then that name becomes
-// the package name.
-// It returns true if it changes the package name, false otherwise.
-func maybeFixPackageName(newName string, isTestFile bool, pkgsOfDir []*Package) {
- names := make(map[string]int)
- for _, p := range pkgsOfDir {
- names[p.Name]++
- }
- if len(names) != 1 {
- // some files are in different packages
- return
- }
- var oldName string
- for k := range names {
- oldName = k
- }
- if newName == oldName {
- return
- }
- // We might have a case where all of the package names in the directory are
- // the same, but the overlay file is for an x test, which belongs to its
- // own package. If the x test does not yet exist on disk, we may not yet
- // have its package name on disk, but we should not rename the packages.
- //
- // We use a heuristic to determine if this file belongs to an x test:
- // The test file should have a package name whose package name has a _test
- // suffix or looks like "newName_test".
- maybeXTest := strings.HasPrefix(oldName+"_test", newName) || strings.HasSuffix(newName, "_test")
- if isTestFile && maybeXTest {
- return
- }
- for _, p := range pkgsOfDir {
- p.Name = newName
- }
-}
diff --git a/vendor/golang.org/x/tools/go/packages/loadmode_string.go b/vendor/golang.org/x/tools/go/packages/loadmode_string.go
deleted file mode 100644
index 7ea37e7eeac..00000000000
--- a/vendor/golang.org/x/tools/go/packages/loadmode_string.go
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packages
-
-import (
- "fmt"
- "strings"
-)
-
-var allModes = []LoadMode{
- NeedName,
- NeedFiles,
- NeedCompiledGoFiles,
- NeedImports,
- NeedDeps,
- NeedExportsFile,
- NeedTypes,
- NeedSyntax,
- NeedTypesInfo,
- NeedTypesSizes,
-}
-
-var modeStrings = []string{
- "NeedName",
- "NeedFiles",
- "NeedCompiledGoFiles",
- "NeedImports",
- "NeedDeps",
- "NeedExportsFile",
- "NeedTypes",
- "NeedSyntax",
- "NeedTypesInfo",
- "NeedTypesSizes",
-}
-
-func (mod LoadMode) String() string {
- m := mod
- if m == 0 {
- return "LoadMode(0)"
- }
- var out []string
- for i, x := range allModes {
- if x > m {
- break
- }
- if (m & x) != 0 {
- out = append(out, modeStrings[i])
- m = m ^ x
- }
- }
- if m != 0 {
- out = append(out, "Unknown")
- }
- return fmt.Sprintf("LoadMode(%s)", strings.Join(out, "|"))
-}
diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go
deleted file mode 100644
index 04053f1e7d4..00000000000
--- a/vendor/golang.org/x/tools/go/packages/packages.go
+++ /dev/null
@@ -1,1212 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package packages
-
-// See doc.go for package documentation and implementation notes.
-
-import (
- "context"
- "encoding/json"
- "fmt"
- "go/ast"
- "go/parser"
- "go/scanner"
- "go/token"
- "go/types"
- "io/ioutil"
- "log"
- "os"
- "path/filepath"
- "strings"
- "sync"
- "time"
-
- "golang.org/x/tools/go/gcexportdata"
- "golang.org/x/tools/internal/gocommand"
- "golang.org/x/tools/internal/packagesinternal"
- "golang.org/x/tools/internal/typesinternal"
-)
-
-// A LoadMode controls the amount of detail to return when loading.
-// The bits below can be combined to specify which fields should be
-// filled in the result packages.
-// The zero value is a special case, equivalent to combining
-// the NeedName, NeedFiles, and NeedCompiledGoFiles bits.
-// ID and Errors (if present) will always be filled.
-// Load may return more information than requested.
-type LoadMode int
-
-// TODO(matloob): When a V2 of go/packages is released, rename NeedExportsFile to
-// NeedExportFile to make it consistent with the Package field it's adding.
-
-const (
- // NeedName adds Name and PkgPath.
- NeedName LoadMode = 1 << iota
-
- // NeedFiles adds GoFiles and OtherFiles.
- NeedFiles
-
- // NeedCompiledGoFiles adds CompiledGoFiles.
- NeedCompiledGoFiles
-
- // NeedImports adds Imports. If NeedDeps is not set, the Imports field will contain
- // "placeholder" Packages with only the ID set.
- NeedImports
-
- // NeedDeps adds the fields requested by the LoadMode in the packages in Imports.
- NeedDeps
-
- // NeedExportsFile adds ExportFile.
- NeedExportsFile
-
- // NeedTypes adds Types, Fset, and IllTyped.
- NeedTypes
-
- // NeedSyntax adds Syntax.
- NeedSyntax
-
- // NeedTypesInfo adds TypesInfo.
- NeedTypesInfo
-
- // NeedTypesSizes adds TypesSizes.
- NeedTypesSizes
-
- // typecheckCgo enables full support for type checking cgo. Requires Go 1.15+.
- // Modifies CompiledGoFiles and Types, and has no effect on its own.
- typecheckCgo
-
- // NeedModule adds Module.
- NeedModule
-)
-
-const (
- // Deprecated: LoadFiles exists for historical compatibility
- // and should not be used. Please directly specify the needed fields using the Need values.
- LoadFiles = NeedName | NeedFiles | NeedCompiledGoFiles
-
- // Deprecated: LoadImports exists for historical compatibility
- // and should not be used. Please directly specify the needed fields using the Need values.
- LoadImports = LoadFiles | NeedImports
-
- // Deprecated: LoadTypes exists for historical compatibility
- // and should not be used. Please directly specify the needed fields using the Need values.
- LoadTypes = LoadImports | NeedTypes | NeedTypesSizes
-
- // Deprecated: LoadSyntax exists for historical compatibility
- // and should not be used. Please directly specify the needed fields using the Need values.
- LoadSyntax = LoadTypes | NeedSyntax | NeedTypesInfo
-
- // Deprecated: LoadAllSyntax exists for historical compatibility
- // and should not be used. Please directly specify the needed fields using the Need values.
- LoadAllSyntax = LoadSyntax | NeedDeps
-)
-
-// A Config specifies details about how packages should be loaded.
-// The zero value is a valid configuration.
-// Calls to Load do not modify this struct.
-type Config struct {
- // Mode controls the level of information returned for each package.
- Mode LoadMode
-
- // Context specifies the context for the load operation.
- // If the context is cancelled, the loader may stop early
- // and return an ErrCancelled error.
- // If Context is nil, the load cannot be cancelled.
- Context context.Context
-
- // Logf is the logger for the config.
- // If the user provides a logger, debug logging is enabled.
- // If the GOPACKAGESDEBUG environment variable is set to true,
- // but the logger is nil, default to log.Printf.
- Logf func(format string, args ...interface{})
-
- // Dir is the directory in which to run the build system's query tool
- // that provides information about the packages.
- // If Dir is empty, the tool is run in the current directory.
- Dir string
-
- // Env is the environment to use when invoking the build system's query tool.
- // If Env is nil, the current environment is used.
- // As in os/exec's Cmd, only the last value in the slice for
- // each environment key is used. To specify the setting of only
- // a few variables, append to the current environment, as in:
- //
- // opt.Env = append(os.Environ(), "GOOS=plan9", "GOARCH=386")
- //
- Env []string
-
- // gocmdRunner guards go command calls from concurrency errors.
- gocmdRunner *gocommand.Runner
-
- // BuildFlags is a list of command-line flags to be passed through to
- // the build system's query tool.
- BuildFlags []string
-
- // Fset provides source position information for syntax trees and types.
- // If Fset is nil, Load will use a new fileset, but preserve Fset's value.
- Fset *token.FileSet
-
- // ParseFile is called to read and parse each file
- // when preparing a package's type-checked syntax tree.
- // It must be safe to call ParseFile simultaneously from multiple goroutines.
- // If ParseFile is nil, the loader will uses parser.ParseFile.
- //
- // ParseFile should parse the source from src and use filename only for
- // recording position information.
- //
- // An application may supply a custom implementation of ParseFile
- // to change the effective file contents or the behavior of the parser,
- // or to modify the syntax tree. For example, selectively eliminating
- // unwanted function bodies can significantly accelerate type checking.
- ParseFile func(fset *token.FileSet, filename string, src []byte) (*ast.File, error)
-
- // If Tests is set, the loader includes not just the packages
- // matching a particular pattern but also any related test packages,
- // including test-only variants of the package and the test executable.
- //
- // For example, when using the go command, loading "fmt" with Tests=true
- // returns four packages, with IDs "fmt" (the standard package),
- // "fmt [fmt.test]" (the package as compiled for the test),
- // "fmt_test" (the test functions from source files in package fmt_test),
- // and "fmt.test" (the test binary).
- //
- // In build systems with explicit names for tests,
- // setting Tests may have no effect.
- Tests bool
-
- // Overlay provides a mapping of absolute file paths to file contents.
- // If the file with the given path already exists, the parser will use the
- // alternative file contents provided by the map.
- //
- // Overlays provide incomplete support for when a given file doesn't
- // already exist on disk. See the package doc above for more details.
- Overlay map[string][]byte
-}
-
-// driver is the type for functions that query the build system for the
-// packages named by the patterns.
-type driver func(cfg *Config, patterns ...string) (*driverResponse, error)
-
-// driverResponse contains the results for a driver query.
-type driverResponse struct {
- // NotHandled is returned if the request can't be handled by the current
- // driver. If an external driver returns a response with NotHandled, the
- // rest of the driverResponse is ignored, and go/packages will fallback
- // to the next driver. If go/packages is extended in the future to support
- // lists of multiple drivers, go/packages will fall back to the next driver.
- NotHandled bool
-
- // Sizes, if not nil, is the types.Sizes to use when type checking.
- Sizes *types.StdSizes
-
- // Roots is the set of package IDs that make up the root packages.
- // We have to encode this separately because when we encode a single package
- // we cannot know if it is one of the roots as that requires knowledge of the
- // graph it is part of.
- Roots []string `json:",omitempty"`
-
- // Packages is the full set of packages in the graph.
- // The packages are not connected into a graph.
- // The Imports if populated will be stubs that only have their ID set.
- // Imports will be connected and then type and syntax information added in a
- // later pass (see refine).
- Packages []*Package
-}
-
-// Load loads and returns the Go packages named by the given patterns.
-//
-// Config specifies loading options;
-// nil behaves the same as an empty Config.
-//
-// Load returns an error if any of the patterns was invalid
-// as defined by the underlying build system.
-// It may return an empty list of packages without an error,
-// for instance for an empty expansion of a valid wildcard.
-// Errors associated with a particular package are recorded in the
-// corresponding Package's Errors list, and do not cause Load to
-// return an error. Clients may need to handle such errors before
-// proceeding with further analysis. The PrintErrors function is
-// provided for convenient display of all errors.
-func Load(cfg *Config, patterns ...string) ([]*Package, error) {
- l := newLoader(cfg)
- response, err := defaultDriver(&l.Config, patterns...)
- if err != nil {
- return nil, err
- }
- l.sizes = response.Sizes
- return l.refine(response.Roots, response.Packages...)
-}
-
-// defaultDriver is a driver that implements go/packages' fallback behavior.
-// It will try to request to an external driver, if one exists. If there's
-// no external driver, or the driver returns a response with NotHandled set,
-// defaultDriver will fall back to the go list driver.
-func defaultDriver(cfg *Config, patterns ...string) (*driverResponse, error) {
- driver := findExternalDriver(cfg)
- if driver == nil {
- driver = goListDriver
- }
- response, err := driver(cfg, patterns...)
- if err != nil {
- return response, err
- } else if response.NotHandled {
- return goListDriver(cfg, patterns...)
- }
- return response, nil
-}
-
-// A Package describes a loaded Go package.
-type Package struct {
- // ID is a unique identifier for a package,
- // in a syntax provided by the underlying build system.
- //
- // Because the syntax varies based on the build system,
- // clients should treat IDs as opaque and not attempt to
- // interpret them.
- ID string
-
- // Name is the package name as it appears in the package source code.
- Name string
-
- // PkgPath is the package path as used by the go/types package.
- PkgPath string
-
- // Errors contains any errors encountered querying the metadata
- // of the package, or while parsing or type-checking its files.
- Errors []Error
-
- // GoFiles lists the absolute file paths of the package's Go source files.
- GoFiles []string
-
- // CompiledGoFiles lists the absolute file paths of the package's source
- // files that are suitable for type checking.
- // This may differ from GoFiles if files are processed before compilation.
- CompiledGoFiles []string
-
- // OtherFiles lists the absolute file paths of the package's non-Go source files,
- // including assembly, C, C++, Fortran, Objective-C, SWIG, and so on.
- OtherFiles []string
-
- // ExportFile is the absolute path to a file containing type
- // information for the package as provided by the build system.
- ExportFile string
-
- // Imports maps import paths appearing in the package's Go source files
- // to corresponding loaded Packages.
- Imports map[string]*Package
-
- // Types provides type information for the package.
- // The NeedTypes LoadMode bit sets this field for packages matching the
- // patterns; type information for dependencies may be missing or incomplete,
- // unless NeedDeps and NeedImports are also set.
- Types *types.Package
-
- // Fset provides position information for Types, TypesInfo, and Syntax.
- // It is set only when Types is set.
- Fset *token.FileSet
-
- // IllTyped indicates whether the package or any dependency contains errors.
- // It is set only when Types is set.
- IllTyped bool
-
- // Syntax is the package's syntax trees, for the files listed in CompiledGoFiles.
- //
- // The NeedSyntax LoadMode bit populates this field for packages matching the patterns.
- // If NeedDeps and NeedImports are also set, this field will also be populated
- // for dependencies.
- Syntax []*ast.File
-
- // TypesInfo provides type information about the package's syntax trees.
- // It is set only when Syntax is set.
- TypesInfo *types.Info
-
- // TypesSizes provides the effective size function for types in TypesInfo.
- TypesSizes types.Sizes
-
- // forTest is the package under test, if any.
- forTest string
-
- // module is the module information for the package if it exists.
- Module *Module
-}
-
-// Module provides module information for a package.
-type Module struct {
- Path string // module path
- Version string // module version
- Replace *Module // replaced by this module
- Time *time.Time // time version was created
- Main bool // is this the main module?
- Indirect bool // is this module only an indirect dependency of main module?
- Dir string // directory holding files for this module, if any
- GoMod string // path to go.mod file used when loading this module, if any
- GoVersion string // go version used in module
- Error *ModuleError // error loading module
-}
-
-// ModuleError holds errors loading a module.
-type ModuleError struct {
- Err string // the error itself
-}
-
-func init() {
- packagesinternal.GetForTest = func(p interface{}) string {
- return p.(*Package).forTest
- }
- packagesinternal.GetGoCmdRunner = func(config interface{}) *gocommand.Runner {
- return config.(*Config).gocmdRunner
- }
- packagesinternal.SetGoCmdRunner = func(config interface{}, runner *gocommand.Runner) {
- config.(*Config).gocmdRunner = runner
- }
- packagesinternal.TypecheckCgo = int(typecheckCgo)
-}
-
-// An Error describes a problem with a package's metadata, syntax, or types.
-type Error struct {
- Pos string // "file:line:col" or "file:line" or "" or "-"
- Msg string
- Kind ErrorKind
-}
-
-// ErrorKind describes the source of the error, allowing the user to
-// differentiate between errors generated by the driver, the parser, or the
-// type-checker.
-type ErrorKind int
-
-const (
- UnknownError ErrorKind = iota
- ListError
- ParseError
- TypeError
-)
-
-func (err Error) Error() string {
- pos := err.Pos
- if pos == "" {
- pos = "-" // like token.Position{}.String()
- }
- return pos + ": " + err.Msg
-}
-
-// flatPackage is the JSON form of Package
-// It drops all the type and syntax fields, and transforms the Imports
-//
-// TODO(adonovan): identify this struct with Package, effectively
-// publishing the JSON protocol.
-type flatPackage struct {
- ID string
- Name string `json:",omitempty"`
- PkgPath string `json:",omitempty"`
- Errors []Error `json:",omitempty"`
- GoFiles []string `json:",omitempty"`
- CompiledGoFiles []string `json:",omitempty"`
- OtherFiles []string `json:",omitempty"`
- ExportFile string `json:",omitempty"`
- Imports map[string]string `json:",omitempty"`
-}
-
-// MarshalJSON returns the Package in its JSON form.
-// For the most part, the structure fields are written out unmodified, and
-// the type and syntax fields are skipped.
-// The imports are written out as just a map of path to package id.
-// The errors are written using a custom type that tries to preserve the
-// structure of error types we know about.
-//
-// This method exists to enable support for additional build systems. It is
-// not intended for use by clients of the API and we may change the format.
-func (p *Package) MarshalJSON() ([]byte, error) {
- flat := &flatPackage{
- ID: p.ID,
- Name: p.Name,
- PkgPath: p.PkgPath,
- Errors: p.Errors,
- GoFiles: p.GoFiles,
- CompiledGoFiles: p.CompiledGoFiles,
- OtherFiles: p.OtherFiles,
- ExportFile: p.ExportFile,
- }
- if len(p.Imports) > 0 {
- flat.Imports = make(map[string]string, len(p.Imports))
- for path, ipkg := range p.Imports {
- flat.Imports[path] = ipkg.ID
- }
- }
- return json.Marshal(flat)
-}
-
-// UnmarshalJSON reads in a Package from its JSON format.
-// See MarshalJSON for details about the format accepted.
-func (p *Package) UnmarshalJSON(b []byte) error {
- flat := &flatPackage{}
- if err := json.Unmarshal(b, &flat); err != nil {
- return err
- }
- *p = Package{
- ID: flat.ID,
- Name: flat.Name,
- PkgPath: flat.PkgPath,
- Errors: flat.Errors,
- GoFiles: flat.GoFiles,
- CompiledGoFiles: flat.CompiledGoFiles,
- OtherFiles: flat.OtherFiles,
- ExportFile: flat.ExportFile,
- }
- if len(flat.Imports) > 0 {
- p.Imports = make(map[string]*Package, len(flat.Imports))
- for path, id := range flat.Imports {
- p.Imports[path] = &Package{ID: id}
- }
- }
- return nil
-}
-
-func (p *Package) String() string { return p.ID }
-
-// loaderPackage augments Package with state used during the loading phase
-type loaderPackage struct {
- *Package
- importErrors map[string]error // maps each bad import to its error
- loadOnce sync.Once
- color uint8 // for cycle detection
- needsrc bool // load from source (Mode >= LoadTypes)
- needtypes bool // type information is either requested or depended on
- initial bool // package was matched by a pattern
-}
-
-// loader holds the working state of a single call to load.
-type loader struct {
- pkgs map[string]*loaderPackage
- Config
- sizes types.Sizes
- parseCache map[string]*parseValue
- parseCacheMu sync.Mutex
- exportMu sync.Mutex // enforces mutual exclusion of exportdata operations
-
- // Config.Mode contains the implied mode (see impliedLoadMode).
- // Implied mode contains all the fields we need the data for.
- // In requestedMode there are the actually requested fields.
- // We'll zero them out before returning packages to the user.
- // This makes it easier for us to get the conditions where
- // we need certain modes right.
- requestedMode LoadMode
-}
-
-type parseValue struct {
- f *ast.File
- err error
- ready chan struct{}
-}
-
-func newLoader(cfg *Config) *loader {
- ld := &loader{
- parseCache: map[string]*parseValue{},
- }
- if cfg != nil {
- ld.Config = *cfg
- // If the user has provided a logger, use it.
- ld.Config.Logf = cfg.Logf
- }
- if ld.Config.Logf == nil {
- // If the GOPACKAGESDEBUG environment variable is set to true,
- // but the user has not provided a logger, default to log.Printf.
- if debug {
- ld.Config.Logf = log.Printf
- } else {
- ld.Config.Logf = func(format string, args ...interface{}) {}
- }
- }
- if ld.Config.Mode == 0 {
- ld.Config.Mode = NeedName | NeedFiles | NeedCompiledGoFiles // Preserve zero behavior of Mode for backwards compatibility.
- }
- if ld.Config.Env == nil {
- ld.Config.Env = os.Environ()
- }
- if ld.Config.gocmdRunner == nil {
- ld.Config.gocmdRunner = &gocommand.Runner{}
- }
- if ld.Context == nil {
- ld.Context = context.Background()
- }
- if ld.Dir == "" {
- if dir, err := os.Getwd(); err == nil {
- ld.Dir = dir
- }
- }
-
- // Save the actually requested fields. We'll zero them out before returning packages to the user.
- ld.requestedMode = ld.Mode
- ld.Mode = impliedLoadMode(ld.Mode)
-
- if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 {
- if ld.Fset == nil {
- ld.Fset = token.NewFileSet()
- }
-
- // ParseFile is required even in LoadTypes mode
- // because we load source if export data is missing.
- if ld.ParseFile == nil {
- ld.ParseFile = func(fset *token.FileSet, filename string, src []byte) (*ast.File, error) {
- const mode = parser.AllErrors | parser.ParseComments
- return parser.ParseFile(fset, filename, src, mode)
- }
- }
- }
-
- return ld
-}
-
-// refine connects the supplied packages into a graph and then adds type and
-// and syntax information as requested by the LoadMode.
-func (ld *loader) refine(roots []string, list ...*Package) ([]*Package, error) {
- rootMap := make(map[string]int, len(roots))
- for i, root := range roots {
- rootMap[root] = i
- }
- ld.pkgs = make(map[string]*loaderPackage)
- // first pass, fixup and build the map and roots
- var initial = make([]*loaderPackage, len(roots))
- for _, pkg := range list {
- rootIndex := -1
- if i, found := rootMap[pkg.ID]; found {
- rootIndex = i
- }
-
- // Overlays can invalidate export data.
- // TODO(matloob): make this check fine-grained based on dependencies on overlaid files
- exportDataInvalid := len(ld.Overlay) > 0 || pkg.ExportFile == "" && pkg.PkgPath != "unsafe"
- // This package needs type information if the caller requested types and the package is
- // either a root, or it's a non-root and the user requested dependencies ...
- needtypes := (ld.Mode&NeedTypes|NeedTypesInfo != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0))
- // This package needs source if the call requested source (or types info, which implies source)
- // and the package is either a root, or itas a non- root and the user requested dependencies...
- needsrc := ((ld.Mode&(NeedSyntax|NeedTypesInfo) != 0 && (rootIndex >= 0 || ld.Mode&NeedDeps != 0)) ||
- // ... or if we need types and the exportData is invalid. We fall back to (incompletely)
- // typechecking packages from source if they fail to compile.
- (ld.Mode&NeedTypes|NeedTypesInfo != 0 && exportDataInvalid)) && pkg.PkgPath != "unsafe"
- lpkg := &loaderPackage{
- Package: pkg,
- needtypes: needtypes,
- needsrc: needsrc,
- }
- ld.pkgs[lpkg.ID] = lpkg
- if rootIndex >= 0 {
- initial[rootIndex] = lpkg
- lpkg.initial = true
- }
- }
- for i, root := range roots {
- if initial[i] == nil {
- return nil, fmt.Errorf("root package %v is missing", root)
- }
- }
-
- // Materialize the import graph.
-
- const (
- white = 0 // new
- grey = 1 // in progress
- black = 2 // complete
- )
-
- // visit traverses the import graph, depth-first,
- // and materializes the graph as Packages.Imports.
- //
- // Valid imports are saved in the Packages.Import map.
- // Invalid imports (cycles and missing nodes) are saved in the importErrors map.
- // Thus, even in the presence of both kinds of errors, the Import graph remains a DAG.
- //
- // visit returns whether the package needs src or has a transitive
- // dependency on a package that does. These are the only packages
- // for which we load source code.
- var stack []*loaderPackage
- var visit func(lpkg *loaderPackage) bool
- var srcPkgs []*loaderPackage
- visit = func(lpkg *loaderPackage) bool {
- switch lpkg.color {
- case black:
- return lpkg.needsrc
- case grey:
- panic("internal error: grey node")
- }
- lpkg.color = grey
- stack = append(stack, lpkg) // push
- stubs := lpkg.Imports // the structure form has only stubs with the ID in the Imports
- // If NeedImports isn't set, the imports fields will all be zeroed out.
- if ld.Mode&NeedImports != 0 {
- lpkg.Imports = make(map[string]*Package, len(stubs))
- for importPath, ipkg := range stubs {
- var importErr error
- imp := ld.pkgs[ipkg.ID]
- if imp == nil {
- // (includes package "C" when DisableCgo)
- importErr = fmt.Errorf("missing package: %q", ipkg.ID)
- } else if imp.color == grey {
- importErr = fmt.Errorf("import cycle: %s", stack)
- }
- if importErr != nil {
- if lpkg.importErrors == nil {
- lpkg.importErrors = make(map[string]error)
- }
- lpkg.importErrors[importPath] = importErr
- continue
- }
-
- if visit(imp) {
- lpkg.needsrc = true
- }
- lpkg.Imports[importPath] = imp.Package
- }
- }
- if lpkg.needsrc {
- srcPkgs = append(srcPkgs, lpkg)
- }
- if ld.Mode&NeedTypesSizes != 0 {
- lpkg.TypesSizes = ld.sizes
- }
- stack = stack[:len(stack)-1] // pop
- lpkg.color = black
-
- return lpkg.needsrc
- }
-
- if ld.Mode&NeedImports == 0 {
- // We do this to drop the stub import packages that we are not even going to try to resolve.
- for _, lpkg := range initial {
- lpkg.Imports = nil
- }
- } else {
- // For each initial package, create its import DAG.
- for _, lpkg := range initial {
- visit(lpkg)
- }
- }
- if ld.Mode&NeedImports != 0 && ld.Mode&NeedTypes != 0 {
- for _, lpkg := range srcPkgs {
- // Complete type information is required for the
- // immediate dependencies of each source package.
- for _, ipkg := range lpkg.Imports {
- imp := ld.pkgs[ipkg.ID]
- imp.needtypes = true
- }
- }
- }
- // Load type data and syntax if needed, starting at
- // the initial packages (roots of the import DAG).
- if ld.Mode&NeedTypes != 0 || ld.Mode&NeedSyntax != 0 {
- var wg sync.WaitGroup
- for _, lpkg := range initial {
- wg.Add(1)
- go func(lpkg *loaderPackage) {
- ld.loadRecursive(lpkg)
- wg.Done()
- }(lpkg)
- }
- wg.Wait()
- }
-
- result := make([]*Package, len(initial))
- for i, lpkg := range initial {
- result[i] = lpkg.Package
- }
- for i := range ld.pkgs {
- // Clear all unrequested fields, for extra de-Hyrum-ization.
- if ld.requestedMode&NeedName == 0 {
- ld.pkgs[i].Name = ""
- ld.pkgs[i].PkgPath = ""
- }
- if ld.requestedMode&NeedFiles == 0 {
- ld.pkgs[i].GoFiles = nil
- ld.pkgs[i].OtherFiles = nil
- }
- if ld.requestedMode&NeedCompiledGoFiles == 0 {
- ld.pkgs[i].CompiledGoFiles = nil
- }
- if ld.requestedMode&NeedImports == 0 {
- ld.pkgs[i].Imports = nil
- }
- if ld.requestedMode&NeedExportsFile == 0 {
- ld.pkgs[i].ExportFile = ""
- }
- if ld.requestedMode&NeedTypes == 0 {
- ld.pkgs[i].Types = nil
- ld.pkgs[i].Fset = nil
- ld.pkgs[i].IllTyped = false
- }
- if ld.requestedMode&NeedSyntax == 0 {
- ld.pkgs[i].Syntax = nil
- }
- if ld.requestedMode&NeedTypesInfo == 0 {
- ld.pkgs[i].TypesInfo = nil
- }
- if ld.requestedMode&NeedTypesSizes == 0 {
- ld.pkgs[i].TypesSizes = nil
- }
- if ld.requestedMode&NeedModule == 0 {
- ld.pkgs[i].Module = nil
- }
- }
-
- return result, nil
-}
-
-// loadRecursive loads the specified package and its dependencies,
-// recursively, in parallel, in topological order.
-// It is atomic and idempotent.
-// Precondition: ld.Mode&NeedTypes.
-func (ld *loader) loadRecursive(lpkg *loaderPackage) {
- lpkg.loadOnce.Do(func() {
- // Load the direct dependencies, in parallel.
- var wg sync.WaitGroup
- for _, ipkg := range lpkg.Imports {
- imp := ld.pkgs[ipkg.ID]
- wg.Add(1)
- go func(imp *loaderPackage) {
- ld.loadRecursive(imp)
- wg.Done()
- }(imp)
- }
- wg.Wait()
- ld.loadPackage(lpkg)
- })
-}
-
-// loadPackage loads the specified package.
-// It must be called only once per Package,
-// after immediate dependencies are loaded.
-// Precondition: ld.Mode & NeedTypes.
-func (ld *loader) loadPackage(lpkg *loaderPackage) {
- if lpkg.PkgPath == "unsafe" {
- // Fill in the blanks to avoid surprises.
- lpkg.Types = types.Unsafe
- lpkg.Fset = ld.Fset
- lpkg.Syntax = []*ast.File{}
- lpkg.TypesInfo = new(types.Info)
- lpkg.TypesSizes = ld.sizes
- return
- }
-
- // Call NewPackage directly with explicit name.
- // This avoids skew between golist and go/types when the files'
- // package declarations are inconsistent.
- lpkg.Types = types.NewPackage(lpkg.PkgPath, lpkg.Name)
- lpkg.Fset = ld.Fset
-
- // Subtle: we populate all Types fields with an empty Package
- // before loading export data so that export data processing
- // never has to create a types.Package for an indirect dependency,
- // which would then require that such created packages be explicitly
- // inserted back into the Import graph as a final step after export data loading.
- // The Diamond test exercises this case.
- if !lpkg.needtypes && !lpkg.needsrc {
- return
- }
- if !lpkg.needsrc {
- ld.loadFromExportData(lpkg)
- return // not a source package, don't get syntax trees
- }
-
- appendError := func(err error) {
- // Convert various error types into the one true Error.
- var errs []Error
- switch err := err.(type) {
- case Error:
- // from driver
- errs = append(errs, err)
-
- case *os.PathError:
- // from parser
- errs = append(errs, Error{
- Pos: err.Path + ":1",
- Msg: err.Err.Error(),
- Kind: ParseError,
- })
-
- case scanner.ErrorList:
- // from parser
- for _, err := range err {
- errs = append(errs, Error{
- Pos: err.Pos.String(),
- Msg: err.Msg,
- Kind: ParseError,
- })
- }
-
- case types.Error:
- // from type checker
- errs = append(errs, Error{
- Pos: err.Fset.Position(err.Pos).String(),
- Msg: err.Msg,
- Kind: TypeError,
- })
-
- default:
- // unexpected impoverished error from parser?
- errs = append(errs, Error{
- Pos: "-",
- Msg: err.Error(),
- Kind: UnknownError,
- })
-
- // If you see this error message, please file a bug.
- log.Printf("internal error: error %q (%T) without position", err, err)
- }
-
- lpkg.Errors = append(lpkg.Errors, errs...)
- }
-
- if ld.Config.Mode&NeedTypes != 0 && len(lpkg.CompiledGoFiles) == 0 && lpkg.ExportFile != "" {
- // The config requested loading sources and types, but sources are missing.
- // Add an error to the package and fall back to loading from export data.
- appendError(Error{"-", fmt.Sprintf("sources missing for package %s", lpkg.ID), ParseError})
- ld.loadFromExportData(lpkg)
- return // can't get syntax trees for this package
- }
-
- files, errs := ld.parseFiles(lpkg.CompiledGoFiles)
- for _, err := range errs {
- appendError(err)
- }
-
- lpkg.Syntax = files
- if ld.Config.Mode&NeedTypes == 0 {
- return
- }
-
- lpkg.TypesInfo = &types.Info{
- Types: make(map[ast.Expr]types.TypeAndValue),
- Defs: make(map[*ast.Ident]types.Object),
- Uses: make(map[*ast.Ident]types.Object),
- Implicits: make(map[ast.Node]types.Object),
- Scopes: make(map[ast.Node]*types.Scope),
- Selections: make(map[*ast.SelectorExpr]*types.Selection),
- }
- lpkg.TypesSizes = ld.sizes
-
- importer := importerFunc(func(path string) (*types.Package, error) {
- if path == "unsafe" {
- return types.Unsafe, nil
- }
-
- // The imports map is keyed by import path.
- ipkg := lpkg.Imports[path]
- if ipkg == nil {
- if err := lpkg.importErrors[path]; err != nil {
- return nil, err
- }
- // There was skew between the metadata and the
- // import declarations, likely due to an edit
- // race, or because the ParseFile feature was
- // used to supply alternative file contents.
- return nil, fmt.Errorf("no metadata for %s", path)
- }
-
- if ipkg.Types != nil && ipkg.Types.Complete() {
- return ipkg.Types, nil
- }
- log.Fatalf("internal error: package %q without types was imported from %q", path, lpkg)
- panic("unreachable")
- })
-
- // type-check
- tc := &types.Config{
- Importer: importer,
-
- // Type-check bodies of functions only in non-initial packages.
- // Example: for import graph A->B->C and initial packages {A,C},
- // we can ignore function bodies in B.
- IgnoreFuncBodies: ld.Mode&NeedDeps == 0 && !lpkg.initial,
-
- Error: appendError,
- Sizes: ld.sizes,
- }
- if (ld.Mode & typecheckCgo) != 0 {
- if !typesinternal.SetUsesCgo(tc) {
- appendError(Error{
- Msg: "typecheckCgo requires Go 1.15+",
- Kind: ListError,
- })
- return
- }
- }
- types.NewChecker(tc, ld.Fset, lpkg.Types, lpkg.TypesInfo).Files(lpkg.Syntax)
-
- lpkg.importErrors = nil // no longer needed
-
- // If !Cgo, the type-checker uses FakeImportC mode, so
- // it doesn't invoke the importer for import "C",
- // nor report an error for the import,
- // or for any undefined C.f reference.
- // We must detect this explicitly and correctly
- // mark the package as IllTyped (by reporting an error).
- // TODO(adonovan): if these errors are annoying,
- // we could just set IllTyped quietly.
- if tc.FakeImportC {
- outer:
- for _, f := range lpkg.Syntax {
- for _, imp := range f.Imports {
- if imp.Path.Value == `"C"` {
- err := types.Error{Fset: ld.Fset, Pos: imp.Pos(), Msg: `import "C" ignored`}
- appendError(err)
- break outer
- }
- }
- }
- }
-
- // Record accumulated errors.
- illTyped := len(lpkg.Errors) > 0
- if !illTyped {
- for _, imp := range lpkg.Imports {
- if imp.IllTyped {
- illTyped = true
- break
- }
- }
- }
- lpkg.IllTyped = illTyped
-}
-
-// An importFunc is an implementation of the single-method
-// types.Importer interface based on a function value.
-type importerFunc func(path string) (*types.Package, error)
-
-func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) }
-
-// We use a counting semaphore to limit
-// the number of parallel I/O calls per process.
-var ioLimit = make(chan bool, 20)
-
-func (ld *loader) parseFile(filename string) (*ast.File, error) {
- ld.parseCacheMu.Lock()
- v, ok := ld.parseCache[filename]
- if ok {
- // cache hit
- ld.parseCacheMu.Unlock()
- <-v.ready
- } else {
- // cache miss
- v = &parseValue{ready: make(chan struct{})}
- ld.parseCache[filename] = v
- ld.parseCacheMu.Unlock()
-
- var src []byte
- for f, contents := range ld.Config.Overlay {
- if sameFile(f, filename) {
- src = contents
- }
- }
- var err error
- if src == nil {
- ioLimit <- true // wait
- src, err = ioutil.ReadFile(filename)
- <-ioLimit // signal
- }
- if err != nil {
- v.err = err
- } else {
- v.f, v.err = ld.ParseFile(ld.Fset, filename, src)
- }
-
- close(v.ready)
- }
- return v.f, v.err
-}
-
-// parseFiles reads and parses the Go source files and returns the ASTs
-// of the ones that could be at least partially parsed, along with a
-// list of I/O and parse errors encountered.
-//
-// Because files are scanned in parallel, the token.Pos
-// positions of the resulting ast.Files are not ordered.
-//
-func (ld *loader) parseFiles(filenames []string) ([]*ast.File, []error) {
- var wg sync.WaitGroup
- n := len(filenames)
- parsed := make([]*ast.File, n)
- errors := make([]error, n)
- for i, file := range filenames {
- if ld.Config.Context.Err() != nil {
- parsed[i] = nil
- errors[i] = ld.Config.Context.Err()
- continue
- }
- wg.Add(1)
- go func(i int, filename string) {
- parsed[i], errors[i] = ld.parseFile(filename)
- wg.Done()
- }(i, file)
- }
- wg.Wait()
-
- // Eliminate nils, preserving order.
- var o int
- for _, f := range parsed {
- if f != nil {
- parsed[o] = f
- o++
- }
- }
- parsed = parsed[:o]
-
- o = 0
- for _, err := range errors {
- if err != nil {
- errors[o] = err
- o++
- }
- }
- errors = errors[:o]
-
- return parsed, errors
-}
-
-// sameFile returns true if x and y have the same basename and denote
-// the same file.
-//
-func sameFile(x, y string) bool {
- if x == y {
- // It could be the case that y doesn't exist.
- // For instance, it may be an overlay file that
- // hasn't been written to disk. To handle that case
- // let x == y through. (We added the exact absolute path
- // string to the CompiledGoFiles list, so the unwritten
- // overlay case implies x==y.)
- return true
- }
- if strings.EqualFold(filepath.Base(x), filepath.Base(y)) { // (optimisation)
- if xi, err := os.Stat(x); err == nil {
- if yi, err := os.Stat(y); err == nil {
- return os.SameFile(xi, yi)
- }
- }
- }
- return false
-}
-
-// loadFromExportData returns type information for the specified
-// package, loading it from an export data file on the first request.
-func (ld *loader) loadFromExportData(lpkg *loaderPackage) (*types.Package, error) {
- if lpkg.PkgPath == "" {
- log.Fatalf("internal error: Package %s has no PkgPath", lpkg)
- }
-
- // Because gcexportdata.Read has the potential to create or
- // modify the types.Package for each node in the transitive
- // closure of dependencies of lpkg, all exportdata operations
- // must be sequential. (Finer-grained locking would require
- // changes to the gcexportdata API.)
- //
- // The exportMu lock guards the Package.Pkg field and the
- // types.Package it points to, for each Package in the graph.
- //
- // Not all accesses to Package.Pkg need to be protected by exportMu:
- // graph ordering ensures that direct dependencies of source
- // packages are fully loaded before the importer reads their Pkg field.
- ld.exportMu.Lock()
- defer ld.exportMu.Unlock()
-
- if tpkg := lpkg.Types; tpkg != nil && tpkg.Complete() {
- return tpkg, nil // cache hit
- }
-
- lpkg.IllTyped = true // fail safe
-
- if lpkg.ExportFile == "" {
- // Errors while building export data will have been printed to stderr.
- return nil, fmt.Errorf("no export data file")
- }
- f, err := os.Open(lpkg.ExportFile)
- if err != nil {
- return nil, err
- }
- defer f.Close()
-
- // Read gc export data.
- //
- // We don't currently support gccgo export data because all
- // underlying workspaces use the gc toolchain. (Even build
- // systems that support gccgo don't use it for workspace
- // queries.)
- r, err := gcexportdata.NewReader(f)
- if err != nil {
- return nil, fmt.Errorf("reading %s: %v", lpkg.ExportFile, err)
- }
-
- // Build the view.
- //
- // The gcexportdata machinery has no concept of package ID.
- // It identifies packages by their PkgPath, which although not
- // globally unique is unique within the scope of one invocation
- // of the linker, type-checker, or gcexportdata.
- //
- // So, we must build a PkgPath-keyed view of the global
- // (conceptually ID-keyed) cache of packages and pass it to
- // gcexportdata. The view must contain every existing
- // package that might possibly be mentioned by the
- // current package---its transitive closure.
- //
- // In loadPackage, we unconditionally create a types.Package for
- // each dependency so that export data loading does not
- // create new ones.
- //
- // TODO(adonovan): it would be simpler and more efficient
- // if the export data machinery invoked a callback to
- // get-or-create a package instead of a map.
- //
- view := make(map[string]*types.Package) // view seen by gcexportdata
- seen := make(map[*loaderPackage]bool) // all visited packages
- var visit func(pkgs map[string]*Package)
- visit = func(pkgs map[string]*Package) {
- for _, p := range pkgs {
- lpkg := ld.pkgs[p.ID]
- if !seen[lpkg] {
- seen[lpkg] = true
- view[lpkg.PkgPath] = lpkg.Types
- visit(lpkg.Imports)
- }
- }
- }
- visit(lpkg.Imports)
-
- viewLen := len(view) + 1 // adding the self package
- // Parse the export data.
- // (May modify incomplete packages in view but not create new ones.)
- tpkg, err := gcexportdata.Read(r, ld.Fset, view, lpkg.PkgPath)
- if err != nil {
- return nil, fmt.Errorf("reading %s: %v", lpkg.ExportFile, err)
- }
- if viewLen != len(view) {
- log.Fatalf("Unexpected package creation during export data loading")
- }
-
- lpkg.Types = tpkg
- lpkg.IllTyped = false
-
- return tpkg, nil
-}
-
-// impliedLoadMode returns loadMode with its dependencies.
-func impliedLoadMode(loadMode LoadMode) LoadMode {
- if loadMode&NeedTypesInfo != 0 && loadMode&NeedImports == 0 {
- // If NeedTypesInfo, go/packages needs to do typechecking itself so it can
- // associate type info with the AST. To do so, we need the export data
- // for dependencies, which means we need to ask for the direct dependencies.
- // NeedImports is used to ask for the direct dependencies.
- loadMode |= NeedImports
- }
-
- if loadMode&NeedDeps != 0 && loadMode&NeedImports == 0 {
- // With NeedDeps we need to load at least direct dependencies.
- // NeedImports is used to ask for the direct dependencies.
- loadMode |= NeedImports
- }
-
- return loadMode
-}
-
-func usesExportData(cfg *Config) bool {
- return cfg.Mode&NeedExportsFile != 0 || cfg.Mode&NeedTypes != 0 && cfg.Mode&NeedDeps == 0
-}
diff --git a/vendor/golang.org/x/tools/go/packages/visit.go b/vendor/golang.org/x/tools/go/packages/visit.go
deleted file mode 100644
index b13cb081fcb..00000000000
--- a/vendor/golang.org/x/tools/go/packages/visit.go
+++ /dev/null
@@ -1,55 +0,0 @@
-package packages
-
-import (
- "fmt"
- "os"
- "sort"
-)
-
-// Visit visits all the packages in the import graph whose roots are
-// pkgs, calling the optional pre function the first time each package
-// is encountered (preorder), and the optional post function after a
-// package's dependencies have been visited (postorder).
-// The boolean result of pre(pkg) determines whether
-// the imports of package pkg are visited.
-func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) {
- seen := make(map[*Package]bool)
- var visit func(*Package)
- visit = func(pkg *Package) {
- if !seen[pkg] {
- seen[pkg] = true
-
- if pre == nil || pre(pkg) {
- paths := make([]string, 0, len(pkg.Imports))
- for path := range pkg.Imports {
- paths = append(paths, path)
- }
- sort.Strings(paths) // Imports is a map, this makes visit stable
- for _, path := range paths {
- visit(pkg.Imports[path])
- }
- }
-
- if post != nil {
- post(pkg)
- }
- }
- }
- for _, pkg := range pkgs {
- visit(pkg)
- }
-}
-
-// PrintErrors prints to os.Stderr the accumulated errors of all
-// packages in the import graph rooted at pkgs, dependencies first.
-// PrintErrors returns the number of errors printed.
-func PrintErrors(pkgs []*Package) int {
- var n int
- Visit(pkgs, nil, func(pkg *Package) {
- for _, err := range pkg.Errors {
- fmt.Fprintln(os.Stderr, err)
- n++
- }
- })
- return n
-}
diff --git a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go b/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go
deleted file mode 100644
index cffd7acbee7..00000000000
--- a/vendor/golang.org/x/tools/go/types/objectpath/objectpath.go
+++ /dev/null
@@ -1,524 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package objectpath defines a naming scheme for types.Objects
-// (that is, named entities in Go programs) relative to their enclosing
-// package.
-//
-// Type-checker objects are canonical, so they are usually identified by
-// their address in memory (a pointer), but a pointer has meaning only
-// within one address space. By contrast, objectpath names allow the
-// identity of an object to be sent from one program to another,
-// establishing a correspondence between types.Object variables that are
-// distinct but logically equivalent.
-//
-// A single object may have multiple paths. In this example,
-// type A struct{ X int }
-// type B A
-// the field X has two paths due to its membership of both A and B.
-// The For(obj) function always returns one of these paths, arbitrarily
-// but consistently.
-package objectpath
-
-import (
- "fmt"
- "strconv"
- "strings"
-
- "go/types"
-)
-
-// A Path is an opaque name that identifies a types.Object
-// relative to its package. Conceptually, the name consists of a
-// sequence of destructuring operations applied to the package scope
-// to obtain the original object.
-// The name does not include the package itself.
-type Path string
-
-// Encoding
-//
-// An object path is a textual and (with training) human-readable encoding
-// of a sequence of destructuring operators, starting from a types.Package.
-// The sequences represent a path through the package/object/type graph.
-// We classify these operators by their type:
-//
-// PO package->object Package.Scope.Lookup
-// OT object->type Object.Type
-// TT type->type Type.{Elem,Key,Params,Results,Underlying} [EKPRU]
-// TO type->object Type.{At,Field,Method,Obj} [AFMO]
-//
-// All valid paths start with a package and end at an object
-// and thus may be defined by the regular language:
-//
-// objectpath = PO (OT TT* TO)*
-//
-// The concrete encoding follows directly:
-// - The only PO operator is Package.Scope.Lookup, which requires an identifier.
-// - The only OT operator is Object.Type,
-// which we encode as '.' because dot cannot appear in an identifier.
-// - The TT operators are encoded as [EKPRU].
-// - The OT operators are encoded as [AFMO];
-// three of these (At,Field,Method) require an integer operand,
-// which is encoded as a string of decimal digits.
-// These indices are stable across different representations
-// of the same package, even source and export data.
-//
-// In the example below,
-//
-// package p
-//
-// type T interface {
-// f() (a string, b struct{ X int })
-// }
-//
-// field X has the path "T.UM0.RA1.F0",
-// representing the following sequence of operations:
-//
-// p.Lookup("T") T
-// .Type().Underlying().Method(0). f
-// .Type().Results().At(1) b
-// .Type().Field(0) X
-//
-// The encoding is not maximally compact---every R or P is
-// followed by an A, for example---but this simplifies the
-// encoder and decoder.
-//
-const (
- // object->type operators
- opType = '.' // .Type() (Object)
-
- // type->type operators
- opElem = 'E' // .Elem() (Pointer, Slice, Array, Chan, Map)
- opKey = 'K' // .Key() (Map)
- opParams = 'P' // .Params() (Signature)
- opResults = 'R' // .Results() (Signature)
- opUnderlying = 'U' // .Underlying() (Named)
-
- // type->object operators
- opAt = 'A' // .At(i) (Tuple)
- opField = 'F' // .Field(i) (Struct)
- opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored)
- opObj = 'O' // .Obj() (Named)
-)
-
-// The For function returns the path to an object relative to its package,
-// or an error if the object is not accessible from the package's Scope.
-//
-// The For function guarantees to return a path only for the following objects:
-// - package-level types
-// - exported package-level non-types
-// - methods
-// - parameter and result variables
-// - struct fields
-// These objects are sufficient to define the API of their package.
-// The objects described by a package's export data are drawn from this set.
-//
-// For does not return a path for predeclared names, imported package
-// names, local names, and unexported package-level names (except
-// types).
-//
-// Example: given this definition,
-//
-// package p
-//
-// type T interface {
-// f() (a string, b struct{ X int })
-// }
-//
-// For(X) would return a path that denotes the following sequence of operations:
-//
-// p.Scope().Lookup("T") (TypeName T)
-// .Type().Underlying().Method(0). (method Func f)
-// .Type().Results().At(1) (field Var b)
-// .Type().Field(0) (field Var X)
-//
-// where p is the package (*types.Package) to which X belongs.
-func For(obj types.Object) (Path, error) {
- pkg := obj.Pkg()
-
- // This table lists the cases of interest.
- //
- // Object Action
- // ------ ------
- // nil reject
- // builtin reject
- // pkgname reject
- // label reject
- // var
- // package-level accept
- // func param/result accept
- // local reject
- // struct field accept
- // const
- // package-level accept
- // local reject
- // func
- // package-level accept
- // init functions reject
- // concrete method accept
- // interface method accept
- // type
- // package-level accept
- // local reject
- //
- // The only accessible package-level objects are members of pkg itself.
- //
- // The cases are handled in four steps:
- //
- // 1. reject nil and builtin
- // 2. accept package-level objects
- // 3. reject obviously invalid objects
- // 4. search the API for the path to the param/result/field/method.
-
- // 1. reference to nil or builtin?
- if pkg == nil {
- return "", fmt.Errorf("predeclared %s has no path", obj)
- }
- scope := pkg.Scope()
-
- // 2. package-level object?
- if scope.Lookup(obj.Name()) == obj {
- // Only exported objects (and non-exported types) have a path.
- // Non-exported types may be referenced by other objects.
- if _, ok := obj.(*types.TypeName); !ok && !obj.Exported() {
- return "", fmt.Errorf("no path for non-exported %v", obj)
- }
- return Path(obj.Name()), nil
- }
-
- // 3. Not a package-level object.
- // Reject obviously non-viable cases.
- switch obj := obj.(type) {
- case *types.Const, // Only package-level constants have a path.
- *types.TypeName, // Only package-level types have a path.
- *types.Label, // Labels are function-local.
- *types.PkgName: // PkgNames are file-local.
- return "", fmt.Errorf("no path for %v", obj)
-
- case *types.Var:
- // Could be:
- // - a field (obj.IsField())
- // - a func parameter or result
- // - a local var.
- // Sadly there is no way to distinguish
- // a param/result from a local
- // so we must proceed to the find.
-
- case *types.Func:
- // A func, if not package-level, must be a method.
- if recv := obj.Type().(*types.Signature).Recv(); recv == nil {
- return "", fmt.Errorf("func is not a method: %v", obj)
- }
- // TODO(adonovan): opt: if the method is concrete,
- // do a specialized version of the rest of this function so
- // that it's O(1) not O(|scope|). Basically 'find' is needed
- // only for struct fields and interface methods.
-
- default:
- panic(obj)
- }
-
- // 4. Search the API for the path to the var (field/param/result) or method.
-
- // First inspect package-level named types.
- // In the presence of path aliases, these give
- // the best paths because non-types may
- // refer to types, but not the reverse.
- empty := make([]byte, 0, 48) // initial space
- names := scope.Names()
- for _, name := range names {
- o := scope.Lookup(name)
- tname, ok := o.(*types.TypeName)
- if !ok {
- continue // handle non-types in second pass
- }
-
- path := append(empty, name...)
- path = append(path, opType)
-
- T := o.Type()
-
- if tname.IsAlias() {
- // type alias
- if r := find(obj, T, path); r != nil {
- return Path(r), nil
- }
- } else {
- // defined (named) type
- if r := find(obj, T.Underlying(), append(path, opUnderlying)); r != nil {
- return Path(r), nil
- }
- }
- }
-
- // Then inspect everything else:
- // non-types, and declared methods of defined types.
- for _, name := range names {
- o := scope.Lookup(name)
- path := append(empty, name...)
- if _, ok := o.(*types.TypeName); !ok {
- if o.Exported() {
- // exported non-type (const, var, func)
- if r := find(obj, o.Type(), append(path, opType)); r != nil {
- return Path(r), nil
- }
- }
- continue
- }
-
- // Inspect declared methods of defined types.
- if T, ok := o.Type().(*types.Named); ok {
- path = append(path, opType)
- for i := 0; i < T.NumMethods(); i++ {
- m := T.Method(i)
- path2 := appendOpArg(path, opMethod, i)
- if m == obj {
- return Path(path2), nil // found declared method
- }
- if r := find(obj, m.Type(), append(path2, opType)); r != nil {
- return Path(r), nil
- }
- }
- }
- }
-
- return "", fmt.Errorf("can't find path for %v in %s", obj, pkg.Path())
-}
-
-func appendOpArg(path []byte, op byte, arg int) []byte {
- path = append(path, op)
- path = strconv.AppendInt(path, int64(arg), 10)
- return path
-}
-
-// find finds obj within type T, returning the path to it, or nil if not found.
-func find(obj types.Object, T types.Type, path []byte) []byte {
- switch T := T.(type) {
- case *types.Basic, *types.Named:
- // Named types belonging to pkg were handled already,
- // so T must belong to another package. No path.
- return nil
- case *types.Pointer:
- return find(obj, T.Elem(), append(path, opElem))
- case *types.Slice:
- return find(obj, T.Elem(), append(path, opElem))
- case *types.Array:
- return find(obj, T.Elem(), append(path, opElem))
- case *types.Chan:
- return find(obj, T.Elem(), append(path, opElem))
- case *types.Map:
- if r := find(obj, T.Key(), append(path, opKey)); r != nil {
- return r
- }
- return find(obj, T.Elem(), append(path, opElem))
- case *types.Signature:
- if r := find(obj, T.Params(), append(path, opParams)); r != nil {
- return r
- }
- return find(obj, T.Results(), append(path, opResults))
- case *types.Struct:
- for i := 0; i < T.NumFields(); i++ {
- f := T.Field(i)
- path2 := appendOpArg(path, opField, i)
- if f == obj {
- return path2 // found field var
- }
- if r := find(obj, f.Type(), append(path2, opType)); r != nil {
- return r
- }
- }
- return nil
- case *types.Tuple:
- for i := 0; i < T.Len(); i++ {
- v := T.At(i)
- path2 := appendOpArg(path, opAt, i)
- if v == obj {
- return path2 // found param/result var
- }
- if r := find(obj, v.Type(), append(path2, opType)); r != nil {
- return r
- }
- }
- return nil
- case *types.Interface:
- for i := 0; i < T.NumMethods(); i++ {
- m := T.Method(i)
- path2 := appendOpArg(path, opMethod, i)
- if m == obj {
- return path2 // found interface method
- }
- if r := find(obj, m.Type(), append(path2, opType)); r != nil {
- return r
- }
- }
- return nil
- }
- panic(T)
-}
-
-// Object returns the object denoted by path p within the package pkg.
-func Object(pkg *types.Package, p Path) (types.Object, error) {
- if p == "" {
- return nil, fmt.Errorf("empty path")
- }
-
- pathstr := string(p)
- var pkgobj, suffix string
- if dot := strings.IndexByte(pathstr, opType); dot < 0 {
- pkgobj = pathstr
- } else {
- pkgobj = pathstr[:dot]
- suffix = pathstr[dot:] // suffix starts with "."
- }
-
- obj := pkg.Scope().Lookup(pkgobj)
- if obj == nil {
- return nil, fmt.Errorf("package %s does not contain %q", pkg.Path(), pkgobj)
- }
-
- // abstraction of *types.{Pointer,Slice,Array,Chan,Map}
- type hasElem interface {
- Elem() types.Type
- }
- // abstraction of *types.{Interface,Named}
- type hasMethods interface {
- Method(int) *types.Func
- NumMethods() int
- }
-
- // The loop state is the pair (t, obj),
- // exactly one of which is non-nil, initially obj.
- // All suffixes start with '.' (the only object->type operation),
- // followed by optional type->type operations,
- // then a type->object operation.
- // The cycle then repeats.
- var t types.Type
- for suffix != "" {
- code := suffix[0]
- suffix = suffix[1:]
-
- // Codes [AFM] have an integer operand.
- var index int
- switch code {
- case opAt, opField, opMethod:
- rest := strings.TrimLeft(suffix, "0123456789")
- numerals := suffix[:len(suffix)-len(rest)]
- suffix = rest
- i, err := strconv.Atoi(numerals)
- if err != nil {
- return nil, fmt.Errorf("invalid path: bad numeric operand %q for code %q", numerals, code)
- }
- index = int(i)
- case opObj:
- // no operand
- default:
- // The suffix must end with a type->object operation.
- if suffix == "" {
- return nil, fmt.Errorf("invalid path: ends with %q, want [AFMO]", code)
- }
- }
-
- if code == opType {
- if t != nil {
- return nil, fmt.Errorf("invalid path: unexpected %q in type context", opType)
- }
- t = obj.Type()
- obj = nil
- continue
- }
-
- if t == nil {
- return nil, fmt.Errorf("invalid path: code %q in object context", code)
- }
-
- // Inv: t != nil, obj == nil
-
- switch code {
- case opElem:
- hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map
- if !ok {
- return nil, fmt.Errorf("cannot apply %q to %s (got %T, want pointer, slice, array, chan or map)", code, t, t)
- }
- t = hasElem.Elem()
-
- case opKey:
- mapType, ok := t.(*types.Map)
- if !ok {
- return nil, fmt.Errorf("cannot apply %q to %s (got %T, want map)", code, t, t)
- }
- t = mapType.Key()
-
- case opParams:
- sig, ok := t.(*types.Signature)
- if !ok {
- return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t)
- }
- t = sig.Params()
-
- case opResults:
- sig, ok := t.(*types.Signature)
- if !ok {
- return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t)
- }
- t = sig.Results()
-
- case opUnderlying:
- named, ok := t.(*types.Named)
- if !ok {
- return nil, fmt.Errorf("cannot apply %q to %s (got %s, want named)", code, t, t)
- }
- t = named.Underlying()
-
- case opAt:
- tuple, ok := t.(*types.Tuple)
- if !ok {
- return nil, fmt.Errorf("cannot apply %q to %s (got %s, want tuple)", code, t, t)
- }
- if n := tuple.Len(); index >= n {
- return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n)
- }
- obj = tuple.At(index)
- t = nil
-
- case opField:
- structType, ok := t.(*types.Struct)
- if !ok {
- return nil, fmt.Errorf("cannot apply %q to %s (got %T, want struct)", code, t, t)
- }
- if n := structType.NumFields(); index >= n {
- return nil, fmt.Errorf("field index %d out of range [0-%d)", index, n)
- }
- obj = structType.Field(index)
- t = nil
-
- case opMethod:
- hasMethods, ok := t.(hasMethods) // Interface or Named
- if !ok {
- return nil, fmt.Errorf("cannot apply %q to %s (got %s, want interface or named)", code, t, t)
- }
- if n := hasMethods.NumMethods(); index >= n {
- return nil, fmt.Errorf("method index %d out of range [0-%d)", index, n)
- }
- obj = hasMethods.Method(index)
- t = nil
-
- case opObj:
- named, ok := t.(*types.Named)
- if !ok {
- return nil, fmt.Errorf("cannot apply %q to %s (got %s, want named)", code, t, t)
- }
- obj = named.Obj()
- t = nil
-
- default:
- return nil, fmt.Errorf("invalid path: unknown code %q", code)
- }
- }
-
- if obj.Pkg() != pkg {
- return nil, fmt.Errorf("path denotes %s, which belongs to a different package", obj)
- }
-
- return obj, nil // success
-}
diff --git a/vendor/golang.org/x/tools/go/types/typeutil/callee.go b/vendor/golang.org/x/tools/go/types/typeutil/callee.go
deleted file mode 100644
index 38f596daf9e..00000000000
--- a/vendor/golang.org/x/tools/go/types/typeutil/callee.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package typeutil
-
-import (
- "go/ast"
- "go/types"
-
- "golang.org/x/tools/go/ast/astutil"
-)
-
-// Callee returns the named target of a function call, if any:
-// a function, method, builtin, or variable.
-func Callee(info *types.Info, call *ast.CallExpr) types.Object {
- var obj types.Object
- switch fun := astutil.Unparen(call.Fun).(type) {
- case *ast.Ident:
- obj = info.Uses[fun] // type, var, builtin, or declared func
- case *ast.SelectorExpr:
- if sel, ok := info.Selections[fun]; ok {
- obj = sel.Obj() // method or field
- } else {
- obj = info.Uses[fun.Sel] // qualified identifier?
- }
- }
- if _, ok := obj.(*types.TypeName); ok {
- return nil // T(x) is a conversion, not a call
- }
- return obj
-}
-
-// StaticCallee returns the target (function or method) of a static
-// function call, if any. It returns nil for calls to builtins.
-func StaticCallee(info *types.Info, call *ast.CallExpr) *types.Func {
- if f, ok := Callee(info, call).(*types.Func); ok && !interfaceMethod(f) {
- return f
- }
- return nil
-}
-
-func interfaceMethod(f *types.Func) bool {
- recv := f.Type().(*types.Signature).Recv()
- return recv != nil && types.IsInterface(recv.Type())
-}
diff --git a/vendor/golang.org/x/tools/go/types/typeutil/imports.go b/vendor/golang.org/x/tools/go/types/typeutil/imports.go
deleted file mode 100644
index 9c441dba9c0..00000000000
--- a/vendor/golang.org/x/tools/go/types/typeutil/imports.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package typeutil
-
-import "go/types"
-
-// Dependencies returns all dependencies of the specified packages.
-//
-// Dependent packages appear in topological order: if package P imports
-// package Q, Q appears earlier than P in the result.
-// The algorithm follows import statements in the order they
-// appear in the source code, so the result is a total order.
-//
-func Dependencies(pkgs ...*types.Package) []*types.Package {
- var result []*types.Package
- seen := make(map[*types.Package]bool)
- var visit func(pkgs []*types.Package)
- visit = func(pkgs []*types.Package) {
- for _, p := range pkgs {
- if !seen[p] {
- seen[p] = true
- visit(p.Imports())
- result = append(result, p)
- }
- }
- }
- visit(pkgs)
- return result
-}
diff --git a/vendor/golang.org/x/tools/go/types/typeutil/map.go b/vendor/golang.org/x/tools/go/types/typeutil/map.go
deleted file mode 100644
index c7f75450064..00000000000
--- a/vendor/golang.org/x/tools/go/types/typeutil/map.go
+++ /dev/null
@@ -1,313 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package typeutil defines various utilities for types, such as Map,
-// a mapping from types.Type to interface{} values.
-package typeutil // import "golang.org/x/tools/go/types/typeutil"
-
-import (
- "bytes"
- "fmt"
- "go/types"
- "reflect"
-)
-
-// Map is a hash-table-based mapping from types (types.Type) to
-// arbitrary interface{} values. The concrete types that implement
-// the Type interface are pointers. Since they are not canonicalized,
-// == cannot be used to check for equivalence, and thus we cannot
-// simply use a Go map.
-//
-// Just as with map[K]V, a nil *Map is a valid empty map.
-//
-// Not thread-safe.
-//
-type Map struct {
- hasher Hasher // shared by many Maps
- table map[uint32][]entry // maps hash to bucket; entry.key==nil means unused
- length int // number of map entries
-}
-
-// entry is an entry (key/value association) in a hash bucket.
-type entry struct {
- key types.Type
- value interface{}
-}
-
-// SetHasher sets the hasher used by Map.
-//
-// All Hashers are functionally equivalent but contain internal state
-// used to cache the results of hashing previously seen types.
-//
-// A single Hasher created by MakeHasher() may be shared among many
-// Maps. This is recommended if the instances have many keys in
-// common, as it will amortize the cost of hash computation.
-//
-// A Hasher may grow without bound as new types are seen. Even when a
-// type is deleted from the map, the Hasher never shrinks, since other
-// types in the map may reference the deleted type indirectly.
-//
-// Hashers are not thread-safe, and read-only operations such as
-// Map.Lookup require updates to the hasher, so a full Mutex lock (not a
-// read-lock) is require around all Map operations if a shared
-// hasher is accessed from multiple threads.
-//
-// If SetHasher is not called, the Map will create a private hasher at
-// the first call to Insert.
-//
-func (m *Map) SetHasher(hasher Hasher) {
- m.hasher = hasher
-}
-
-// Delete removes the entry with the given key, if any.
-// It returns true if the entry was found.
-//
-func (m *Map) Delete(key types.Type) bool {
- if m != nil && m.table != nil {
- hash := m.hasher.Hash(key)
- bucket := m.table[hash]
- for i, e := range bucket {
- if e.key != nil && types.Identical(key, e.key) {
- // We can't compact the bucket as it
- // would disturb iterators.
- bucket[i] = entry{}
- m.length--
- return true
- }
- }
- }
- return false
-}
-
-// At returns the map entry for the given key.
-// The result is nil if the entry is not present.
-//
-func (m *Map) At(key types.Type) interface{} {
- if m != nil && m.table != nil {
- for _, e := range m.table[m.hasher.Hash(key)] {
- if e.key != nil && types.Identical(key, e.key) {
- return e.value
- }
- }
- }
- return nil
-}
-
-// Set sets the map entry for key to val,
-// and returns the previous entry, if any.
-func (m *Map) Set(key types.Type, value interface{}) (prev interface{}) {
- if m.table != nil {
- hash := m.hasher.Hash(key)
- bucket := m.table[hash]
- var hole *entry
- for i, e := range bucket {
- if e.key == nil {
- hole = &bucket[i]
- } else if types.Identical(key, e.key) {
- prev = e.value
- bucket[i].value = value
- return
- }
- }
-
- if hole != nil {
- *hole = entry{key, value} // overwrite deleted entry
- } else {
- m.table[hash] = append(bucket, entry{key, value})
- }
- } else {
- if m.hasher.memo == nil {
- m.hasher = MakeHasher()
- }
- hash := m.hasher.Hash(key)
- m.table = map[uint32][]entry{hash: {entry{key, value}}}
- }
-
- m.length++
- return
-}
-
-// Len returns the number of map entries.
-func (m *Map) Len() int {
- if m != nil {
- return m.length
- }
- return 0
-}
-
-// Iterate calls function f on each entry in the map in unspecified order.
-//
-// If f should mutate the map, Iterate provides the same guarantees as
-// Go maps: if f deletes a map entry that Iterate has not yet reached,
-// f will not be invoked for it, but if f inserts a map entry that
-// Iterate has not yet reached, whether or not f will be invoked for
-// it is unspecified.
-//
-func (m *Map) Iterate(f func(key types.Type, value interface{})) {
- if m != nil {
- for _, bucket := range m.table {
- for _, e := range bucket {
- if e.key != nil {
- f(e.key, e.value)
- }
- }
- }
- }
-}
-
-// Keys returns a new slice containing the set of map keys.
-// The order is unspecified.
-func (m *Map) Keys() []types.Type {
- keys := make([]types.Type, 0, m.Len())
- m.Iterate(func(key types.Type, _ interface{}) {
- keys = append(keys, key)
- })
- return keys
-}
-
-func (m *Map) toString(values bool) string {
- if m == nil {
- return "{}"
- }
- var buf bytes.Buffer
- fmt.Fprint(&buf, "{")
- sep := ""
- m.Iterate(func(key types.Type, value interface{}) {
- fmt.Fprint(&buf, sep)
- sep = ", "
- fmt.Fprint(&buf, key)
- if values {
- fmt.Fprintf(&buf, ": %q", value)
- }
- })
- fmt.Fprint(&buf, "}")
- return buf.String()
-}
-
-// String returns a string representation of the map's entries.
-// Values are printed using fmt.Sprintf("%v", v).
-// Order is unspecified.
-//
-func (m *Map) String() string {
- return m.toString(true)
-}
-
-// KeysString returns a string representation of the map's key set.
-// Order is unspecified.
-//
-func (m *Map) KeysString() string {
- return m.toString(false)
-}
-
-////////////////////////////////////////////////////////////////////////
-// Hasher
-
-// A Hasher maps each type to its hash value.
-// For efficiency, a hasher uses memoization; thus its memory
-// footprint grows monotonically over time.
-// Hashers are not thread-safe.
-// Hashers have reference semantics.
-// Call MakeHasher to create a Hasher.
-type Hasher struct {
- memo map[types.Type]uint32
-}
-
-// MakeHasher returns a new Hasher instance.
-func MakeHasher() Hasher {
- return Hasher{make(map[types.Type]uint32)}
-}
-
-// Hash computes a hash value for the given type t such that
-// Identical(t, t') => Hash(t) == Hash(t').
-func (h Hasher) Hash(t types.Type) uint32 {
- hash, ok := h.memo[t]
- if !ok {
- hash = h.hashFor(t)
- h.memo[t] = hash
- }
- return hash
-}
-
-// hashString computes the Fowler–Noll–Vo hash of s.
-func hashString(s string) uint32 {
- var h uint32
- for i := 0; i < len(s); i++ {
- h ^= uint32(s[i])
- h *= 16777619
- }
- return h
-}
-
-// hashFor computes the hash of t.
-func (h Hasher) hashFor(t types.Type) uint32 {
- // See Identical for rationale.
- switch t := t.(type) {
- case *types.Basic:
- return uint32(t.Kind())
-
- case *types.Array:
- return 9043 + 2*uint32(t.Len()) + 3*h.Hash(t.Elem())
-
- case *types.Slice:
- return 9049 + 2*h.Hash(t.Elem())
-
- case *types.Struct:
- var hash uint32 = 9059
- for i, n := 0, t.NumFields(); i < n; i++ {
- f := t.Field(i)
- if f.Anonymous() {
- hash += 8861
- }
- hash += hashString(t.Tag(i))
- hash += hashString(f.Name()) // (ignore f.Pkg)
- hash += h.Hash(f.Type())
- }
- return hash
-
- case *types.Pointer:
- return 9067 + 2*h.Hash(t.Elem())
-
- case *types.Signature:
- var hash uint32 = 9091
- if t.Variadic() {
- hash *= 8863
- }
- return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results())
-
- case *types.Interface:
- var hash uint32 = 9103
- for i, n := 0, t.NumMethods(); i < n; i++ {
- // See go/types.identicalMethods for rationale.
- // Method order is not significant.
- // Ignore m.Pkg().
- m := t.Method(i)
- hash += 3*hashString(m.Name()) + 5*h.Hash(m.Type())
- }
- return hash
-
- case *types.Map:
- return 9109 + 2*h.Hash(t.Key()) + 3*h.Hash(t.Elem())
-
- case *types.Chan:
- return 9127 + 2*uint32(t.Dir()) + 3*h.Hash(t.Elem())
-
- case *types.Named:
- // Not safe with a copying GC; objects may move.
- return uint32(reflect.ValueOf(t.Obj()).Pointer())
-
- case *types.Tuple:
- return h.hashTuple(t)
- }
- panic(t)
-}
-
-func (h Hasher) hashTuple(tuple *types.Tuple) uint32 {
- // See go/types.identicalTypes for rationale.
- n := tuple.Len()
- var hash uint32 = 9137 + 2*uint32(n)
- for i := 0; i < n; i++ {
- hash += 3 * h.Hash(tuple.At(i).Type())
- }
- return hash
-}
diff --git a/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go b/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go
deleted file mode 100644
index 32084610f49..00000000000
--- a/vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements a cache of method sets.
-
-package typeutil
-
-import (
- "go/types"
- "sync"
-)
-
-// A MethodSetCache records the method set of each type T for which
-// MethodSet(T) is called so that repeat queries are fast.
-// The zero value is a ready-to-use cache instance.
-type MethodSetCache struct {
- mu sync.Mutex
- named map[*types.Named]struct{ value, pointer *types.MethodSet } // method sets for named N and *N
- others map[types.Type]*types.MethodSet // all other types
-}
-
-// MethodSet returns the method set of type T. It is thread-safe.
-//
-// If cache is nil, this function is equivalent to types.NewMethodSet(T).
-// Utility functions can thus expose an optional *MethodSetCache
-// parameter to clients that care about performance.
-//
-func (cache *MethodSetCache) MethodSet(T types.Type) *types.MethodSet {
- if cache == nil {
- return types.NewMethodSet(T)
- }
- cache.mu.Lock()
- defer cache.mu.Unlock()
-
- switch T := T.(type) {
- case *types.Named:
- return cache.lookupNamed(T).value
-
- case *types.Pointer:
- if N, ok := T.Elem().(*types.Named); ok {
- return cache.lookupNamed(N).pointer
- }
- }
-
- // all other types
- // (The map uses pointer equivalence, not type identity.)
- mset := cache.others[T]
- if mset == nil {
- mset = types.NewMethodSet(T)
- if cache.others == nil {
- cache.others = make(map[types.Type]*types.MethodSet)
- }
- cache.others[T] = mset
- }
- return mset
-}
-
-func (cache *MethodSetCache) lookupNamed(named *types.Named) struct{ value, pointer *types.MethodSet } {
- if cache.named == nil {
- cache.named = make(map[*types.Named]struct{ value, pointer *types.MethodSet })
- }
- // Avoid recomputing mset(*T) for each distinct Pointer
- // instance whose underlying type is a named type.
- msets, ok := cache.named[named]
- if !ok {
- msets.value = types.NewMethodSet(named)
- msets.pointer = types.NewMethodSet(types.NewPointer(named))
- cache.named[named] = msets
- }
- return msets
-}
diff --git a/vendor/golang.org/x/tools/go/types/typeutil/ui.go b/vendor/golang.org/x/tools/go/types/typeutil/ui.go
deleted file mode 100644
index 9849c24cef3..00000000000
--- a/vendor/golang.org/x/tools/go/types/typeutil/ui.go
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package typeutil
-
-// This file defines utilities for user interfaces that display types.
-
-import "go/types"
-
-// IntuitiveMethodSet returns the intuitive method set of a type T,
-// which is the set of methods you can call on an addressable value of
-// that type.
-//
-// The result always contains MethodSet(T), and is exactly MethodSet(T)
-// for interface types and for pointer-to-concrete types.
-// For all other concrete types T, the result additionally
-// contains each method belonging to *T if there is no identically
-// named method on T itself.
-//
-// This corresponds to user intuition about method sets;
-// this function is intended only for user interfaces.
-//
-// The order of the result is as for types.MethodSet(T).
-//
-func IntuitiveMethodSet(T types.Type, msets *MethodSetCache) []*types.Selection {
- isPointerToConcrete := func(T types.Type) bool {
- ptr, ok := T.(*types.Pointer)
- return ok && !types.IsInterface(ptr.Elem())
- }
-
- var result []*types.Selection
- mset := msets.MethodSet(T)
- if types.IsInterface(T) || isPointerToConcrete(T) {
- for i, n := 0, mset.Len(); i < n; i++ {
- result = append(result, mset.At(i))
- }
- } else {
- // T is some other concrete type.
- // Report methods of T and *T, preferring those of T.
- pmset := msets.MethodSet(types.NewPointer(T))
- for i, n := 0, pmset.Len(); i < n; i++ {
- meth := pmset.At(i)
- if m := mset.Lookup(meth.Obj().Pkg(), meth.Obj().Name()); m != nil {
- meth = m
- }
- result = append(result, meth)
- }
-
- }
- return result
-}
diff --git a/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go b/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go
deleted file mode 100644
index 9f4c68a1851..00000000000
--- a/vendor/golang.org/x/tools/internal/analysisinternal/analysis.go
+++ /dev/null
@@ -1,302 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package analysisinternal exposes internal-only fields from go/analysis.
-package analysisinternal
-
-import (
- "bytes"
- "fmt"
- "go/ast"
- "go/token"
- "go/types"
- "strings"
-
- "golang.org/x/tools/go/ast/astutil"
-)
-
-var (
- GetTypeErrors func(p interface{}) []types.Error
- SetTypeErrors func(p interface{}, errors []types.Error)
-)
-
-func TypeErrorEndPos(fset *token.FileSet, src []byte, start token.Pos) token.Pos {
- // Get the end position for the type error.
- offset, end := fset.PositionFor(start, false).Offset, start
- if offset >= len(src) {
- return end
- }
- if width := bytes.IndexAny(src[offset:], " \n,():;[]+-*"); width > 0 {
- end = start + token.Pos(width)
- }
- return end
-}
-
-func ZeroValue(fset *token.FileSet, f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
- under := typ
- if n, ok := typ.(*types.Named); ok {
- under = n.Underlying()
- }
- switch u := under.(type) {
- case *types.Basic:
- switch {
- case u.Info()&types.IsNumeric != 0:
- return &ast.BasicLit{Kind: token.INT, Value: "0"}
- case u.Info()&types.IsBoolean != 0:
- return &ast.Ident{Name: "false"}
- case u.Info()&types.IsString != 0:
- return &ast.BasicLit{Kind: token.STRING, Value: `""`}
- default:
- panic("unknown basic type")
- }
- case *types.Chan, *types.Interface, *types.Map, *types.Pointer, *types.Signature, *types.Slice:
- return ast.NewIdent("nil")
- case *types.Struct:
- texpr := TypeExpr(fset, f, pkg, typ) // typ because we want the name here.
- if texpr == nil {
- return nil
- }
- return &ast.CompositeLit{
- Type: texpr,
- }
- case *types.Array:
- texpr := TypeExpr(fset, f, pkg, u.Elem())
- if texpr == nil {
- return nil
- }
- return &ast.CompositeLit{
- Type: &ast.ArrayType{
- Elt: texpr,
- Len: &ast.BasicLit{Kind: token.INT, Value: fmt.Sprintf("%v", u.Len())},
- },
- }
- }
- return nil
-}
-
-func TypeExpr(fset *token.FileSet, f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
- switch t := typ.(type) {
- case *types.Basic:
- switch t.Kind() {
- case types.UnsafePointer:
- return &ast.SelectorExpr{X: ast.NewIdent("unsafe"), Sel: ast.NewIdent("Pointer")}
- default:
- return ast.NewIdent(t.Name())
- }
- case *types.Pointer:
- x := TypeExpr(fset, f, pkg, t.Elem())
- if x == nil {
- return nil
- }
- return &ast.UnaryExpr{
- Op: token.MUL,
- X: x,
- }
- case *types.Array:
- elt := TypeExpr(fset, f, pkg, t.Elem())
- if elt == nil {
- return nil
- }
- return &ast.ArrayType{
- Len: &ast.BasicLit{
- Kind: token.INT,
- Value: fmt.Sprintf("%d", t.Len()),
- },
- Elt: elt,
- }
- case *types.Slice:
- elt := TypeExpr(fset, f, pkg, t.Elem())
- if elt == nil {
- return nil
- }
- return &ast.ArrayType{
- Elt: elt,
- }
- case *types.Map:
- key := TypeExpr(fset, f, pkg, t.Key())
- value := TypeExpr(fset, f, pkg, t.Elem())
- if key == nil || value == nil {
- return nil
- }
- return &ast.MapType{
- Key: key,
- Value: value,
- }
- case *types.Chan:
- dir := ast.ChanDir(t.Dir())
- if t.Dir() == types.SendRecv {
- dir = ast.SEND | ast.RECV
- }
- value := TypeExpr(fset, f, pkg, t.Elem())
- if value == nil {
- return nil
- }
- return &ast.ChanType{
- Dir: dir,
- Value: value,
- }
- case *types.Signature:
- var params []*ast.Field
- for i := 0; i < t.Params().Len(); i++ {
- p := TypeExpr(fset, f, pkg, t.Params().At(i).Type())
- if p == nil {
- return nil
- }
- params = append(params, &ast.Field{
- Type: p,
- Names: []*ast.Ident{
- {
- Name: t.Params().At(i).Name(),
- },
- },
- })
- }
- var returns []*ast.Field
- for i := 0; i < t.Results().Len(); i++ {
- r := TypeExpr(fset, f, pkg, t.Results().At(i).Type())
- if r == nil {
- return nil
- }
- returns = append(returns, &ast.Field{
- Type: r,
- })
- }
- return &ast.FuncType{
- Params: &ast.FieldList{
- List: params,
- },
- Results: &ast.FieldList{
- List: returns,
- },
- }
- case *types.Named:
- if t.Obj().Pkg() == nil {
- return ast.NewIdent(t.Obj().Name())
- }
- if t.Obj().Pkg() == pkg {
- return ast.NewIdent(t.Obj().Name())
- }
- pkgName := t.Obj().Pkg().Name()
- // If the file already imports the package under another name, use that.
- for _, group := range astutil.Imports(fset, f) {
- for _, cand := range group {
- if strings.Trim(cand.Path.Value, `"`) == t.Obj().Pkg().Path() {
- if cand.Name != nil && cand.Name.Name != "" {
- pkgName = cand.Name.Name
- }
- }
- }
- }
- if pkgName == "." {
- return ast.NewIdent(t.Obj().Name())
- }
- return &ast.SelectorExpr{
- X: ast.NewIdent(pkgName),
- Sel: ast.NewIdent(t.Obj().Name()),
- }
- default:
- return nil // TODO: anonymous structs, but who does that
- }
-}
-
-type TypeErrorPass string
-
-const (
- NoNewVars TypeErrorPass = "nonewvars"
- NoResultValues TypeErrorPass = "noresultvalues"
- UndeclaredName TypeErrorPass = "undeclaredname"
-)
-
-// StmtToInsertVarBefore returns the ast.Stmt before which we can safely insert a new variable.
-// Some examples:
-//
-// Basic Example:
-// z := 1
-// y := z + x
-// If x is undeclared, then this function would return `y := z + x`, so that we
-// can insert `x := ` on the line before `y := z + x`.
-//
-// If stmt example:
-// if z == 1 {
-// } else if z == y {}
-// If y is undeclared, then this function would return `if z == 1 {`, because we cannot
-// insert a statement between an if and an else if statement. As a result, we need to find
-// the top of the if chain to insert `y := ` before.
-func StmtToInsertVarBefore(path []ast.Node) ast.Stmt {
- enclosingIndex := -1
- for i, p := range path {
- if _, ok := p.(ast.Stmt); ok {
- enclosingIndex = i
- break
- }
- }
- if enclosingIndex == -1 {
- return nil
- }
- enclosingStmt := path[enclosingIndex]
- switch enclosingStmt.(type) {
- case *ast.IfStmt:
- // The enclosingStmt is inside of the if declaration,
- // We need to check if we are in an else-if stmt and
- // get the base if statement.
- return baseIfStmt(path, enclosingIndex)
- case *ast.CaseClause:
- // Get the enclosing switch stmt if the enclosingStmt is
- // inside of the case statement.
- for i := enclosingIndex + 1; i < len(path); i++ {
- if node, ok := path[i].(*ast.SwitchStmt); ok {
- return node
- } else if node, ok := path[i].(*ast.TypeSwitchStmt); ok {
- return node
- }
- }
- }
- if len(path) <= enclosingIndex+1 {
- return enclosingStmt.(ast.Stmt)
- }
- // Check if the enclosing statement is inside another node.
- switch expr := path[enclosingIndex+1].(type) {
- case *ast.IfStmt:
- // Get the base if statement.
- return baseIfStmt(path, enclosingIndex+1)
- case *ast.ForStmt:
- if expr.Init == enclosingStmt || expr.Post == enclosingStmt {
- return expr
- }
- }
- return enclosingStmt.(ast.Stmt)
-}
-
-// baseIfStmt walks up the if/else-if chain until we get to
-// the top of the current if chain.
-func baseIfStmt(path []ast.Node, index int) ast.Stmt {
- stmt := path[index]
- for i := index + 1; i < len(path); i++ {
- if node, ok := path[i].(*ast.IfStmt); ok && node.Else == stmt {
- stmt = node
- continue
- }
- break
- }
- return stmt.(ast.Stmt)
-}
-
-// WalkASTWithParent walks the AST rooted at n. The semantics are
-// similar to ast.Inspect except it does not call f(nil).
-func WalkASTWithParent(n ast.Node, f func(n ast.Node, parent ast.Node) bool) {
- var ancestors []ast.Node
- ast.Inspect(n, func(n ast.Node) (recurse bool) {
- if n == nil {
- ancestors = ancestors[:len(ancestors)-1]
- return false
- }
-
- var parent ast.Node
- if len(ancestors) > 0 {
- parent = ancestors[len(ancestors)-1]
- }
- ancestors = append(ancestors, n)
- return f(n, parent)
- })
-}
diff --git a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go
deleted file mode 100644
index 2c4527f2436..00000000000
--- a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Package packagesinternal exposes internal-only fields from go/packages.
-package packagesinternal
-
-import (
- "golang.org/x/tools/internal/gocommand"
-)
-
-var GetForTest = func(p interface{}) string { return "" }
-
-var GetGoCmdRunner = func(config interface{}) *gocommand.Runner { return nil }
-
-var SetGoCmdRunner = func(config interface{}, runner *gocommand.Runner) {}
-
-var TypecheckCgo int
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go
deleted file mode 100644
index a5bb408e2f1..00000000000
--- a/vendor/golang.org/x/tools/internal/typesinternal/types.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2020 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package typesinternal
-
-import (
- "go/types"
- "reflect"
- "unsafe"
-)
-
-func SetUsesCgo(conf *types.Config) bool {
- v := reflect.ValueOf(conf).Elem()
-
- f := v.FieldByName("go115UsesCgo")
- if !f.IsValid() {
- f = v.FieldByName("UsesCgo")
- if !f.IsValid() {
- return false
- }
- }
-
- addr := unsafe.Pointer(f.UnsafeAddr())
- *(*bool)(addr) = true
-
- return true
-}
diff --git a/vendor/honnef.co/go/tools/LICENSE b/vendor/honnef.co/go/tools/LICENSE
deleted file mode 100644
index dfd03145460..00000000000
--- a/vendor/honnef.co/go/tools/LICENSE
+++ /dev/null
@@ -1,20 +0,0 @@
-Copyright (c) 2016 Dominik Honnef
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/honnef.co/go/tools/LICENSE-THIRD-PARTY b/vendor/honnef.co/go/tools/LICENSE-THIRD-PARTY
deleted file mode 100644
index 623d85e85b7..00000000000
--- a/vendor/honnef.co/go/tools/LICENSE-THIRD-PARTY
+++ /dev/null
@@ -1,284 +0,0 @@
-Staticcheck and its related tools make use of third party projects,
-either by reusing their code, or by statically linking them into
-resulting binaries. These projects are:
-
-* The Go Programming Language - https://golang.org/
-
- Copyright (c) 2009 The Go Authors. All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are
- met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
- copyright notice, this list of conditions and the following disclaimer
- in the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Google Inc. nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-* github.com/BurntSushi/toml - https://github.com/BurntSushi/toml
-
- The MIT License (MIT)
-
- Copyright (c) 2013 TOML authors
-
- Permission is hereby granted, free of charge, to any person obtaining a copy
- of this software and associated documentation files (the "Software"), to deal
- in the Software without restriction, including without limitation the rights
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- copies of the Software, and to permit persons to whom the Software is
- furnished to do so, subject to the following conditions:
-
- The above copyright notice and this permission notice shall be included in
- all copies or substantial portions of the Software.
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- THE SOFTWARE.
-
-
-* github.com/google/renameio - https://github.com/google/renameio
-
- Copyright 2018 Google Inc.
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
-
-* github.com/kisielk/gotool - https://github.com/kisielk/gotool
-
- Copyright (c) 2013 Kamil Kisiel
-
- Permission is hereby granted, free of charge, to any person obtaining
- a copy of this software and associated documentation files (the
- "Software"), to deal in the Software without restriction, including
- without limitation the rights to use, copy, modify, merge, publish,
- distribute, sublicense, and/or sell copies of the Software, and to
- permit persons to whom the Software is furnished to do so, subject to
- the following conditions:
-
- The above copyright notice and this permission notice shall be
- included in all copies or substantial portions of the Software.
-
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
- LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
- OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
- WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-
- All the files in this distribution are covered under either the MIT
- license (see the file LICENSE) except some files mentioned below.
-
- match.go, match_test.go:
-
- Copyright (c) 2009 The Go Authors. All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are
- met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
- copyright notice, this list of conditions and the following disclaimer
- in the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Google Inc. nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-* github.com/rogpeppe/go-internal - https://github.com/rogpeppe/go-internal
-
- Copyright (c) 2018 The Go Authors. All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are
- met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
- copyright notice, this list of conditions and the following disclaimer
- in the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Google Inc. nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-* golang.org/x/mod/module - https://github.com/golang/mod
-
- Copyright (c) 2009 The Go Authors. All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are
- met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
- copyright notice, this list of conditions and the following disclaimer
- in the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Google Inc. nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-* golang.org/x/tools/go/analysis - https://github.com/golang/tools
-
- Copyright (c) 2009 The Go Authors. All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are
- met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
- copyright notice, this list of conditions and the following disclaimer
- in the documentation and/or other materials provided with the
- distribution.
- * Neither the name of Google Inc. nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-* gogrep - https://github.com/mvdan/gogrep
-
- Copyright (c) 2017, Daniel Martí. All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are
- met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
- copyright notice, this list of conditions and the following disclaimer
- in the documentation and/or other materials provided with the
- distribution.
- * Neither the name of the copyright holder nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-* gosmith - https://github.com/dvyukov/gosmith
-
- Copyright (c) 2014 Dmitry Vyukov. All rights reserved.
-
- Redistribution and use in source and binary forms, with or without
- modification, are permitted provided that the following conditions are
- met:
-
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
- copyright notice, this list of conditions and the following disclaimer
- in the documentation and/or other materials provided with the
- distribution.
- * The name of Dmitry Vyukov may be used to endorse or promote
- products derived from this software without specific prior written permission.
-
- THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/honnef.co/go/tools/arg/arg.go b/vendor/honnef.co/go/tools/arg/arg.go
deleted file mode 100644
index 1e7f30db42d..00000000000
--- a/vendor/honnef.co/go/tools/arg/arg.go
+++ /dev/null
@@ -1,48 +0,0 @@
-package arg
-
-var args = map[string]int{
- "(*encoding/json.Decoder).Decode.v": 0,
- "(*encoding/json.Encoder).Encode.v": 0,
- "(*encoding/xml.Decoder).Decode.v": 0,
- "(*encoding/xml.Encoder).Encode.v": 0,
- "(*sync.Pool).Put.x": 0,
- "(*text/template.Template).Parse.text": 0,
- "(io.Seeker).Seek.offset": 0,
- "(time.Time).Sub.u": 0,
- "append.elems": 1,
- "append.slice": 0,
- "bytes.Equal.a": 0,
- "bytes.Equal.b": 1,
- "encoding/binary.Write.data": 2,
- "errors.New.text": 0,
- "fmt.Fprintf.format": 1,
- "fmt.Printf.format": 0,
- "fmt.Sprintf.a[0]": 1,
- "fmt.Sprintf.format": 0,
- "json.Marshal.v": 0,
- "json.Unmarshal.v": 1,
- "len.v": 0,
- "make.size[0]": 1,
- "make.size[1]": 2,
- "make.t": 0,
- "net/url.Parse.rawurl": 0,
- "os.OpenFile.flag": 1,
- "os/exec.Command.name": 0,
- "os/signal.Notify.c": 0,
- "regexp.Compile.expr": 0,
- "runtime.SetFinalizer.finalizer": 1,
- "runtime.SetFinalizer.obj": 0,
- "sort.Sort.data": 0,
- "time.Parse.layout": 0,
- "time.Sleep.d": 0,
- "xml.Marshal.v": 0,
- "xml.Unmarshal.v": 1,
-}
-
-func Arg(name string) int {
- n, ok := args[name]
- if !ok {
- panic("unknown argument " + name)
- }
- return n
-}
diff --git a/vendor/honnef.co/go/tools/cmd/staticcheck/README.md b/vendor/honnef.co/go/tools/cmd/staticcheck/README.md
deleted file mode 100644
index 4d14577fdf7..00000000000
--- a/vendor/honnef.co/go/tools/cmd/staticcheck/README.md
+++ /dev/null
@@ -1,15 +0,0 @@
-# staticcheck
-
-_staticcheck_ offers extensive analysis of Go code, covering a myriad
-of categories. It will detect bugs, suggest code simplifications,
-point out dead code, and more.
-
-## Installation
-
-See [the main README](https://github.com/dominikh/go-tools#installation) for installation instructions.
-
-## Documentation
-
-Detailed documentation can be found on
-[staticcheck.io](https://staticcheck.io/docs/).
-
diff --git a/vendor/honnef.co/go/tools/cmd/staticcheck/staticcheck.go b/vendor/honnef.co/go/tools/cmd/staticcheck/staticcheck.go
deleted file mode 100644
index 4f504dc39db..00000000000
--- a/vendor/honnef.co/go/tools/cmd/staticcheck/staticcheck.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// staticcheck analyses Go code and makes it better.
-package main // import "honnef.co/go/tools/cmd/staticcheck"
-
-import (
- "log"
- "os"
-
- "golang.org/x/tools/go/analysis"
- "honnef.co/go/tools/lint"
- "honnef.co/go/tools/lint/lintutil"
- "honnef.co/go/tools/simple"
- "honnef.co/go/tools/staticcheck"
- "honnef.co/go/tools/stylecheck"
- "honnef.co/go/tools/unused"
-)
-
-func main() {
- fs := lintutil.FlagSet("staticcheck")
- wholeProgram := fs.Bool("unused.whole-program", false, "Run unused in whole program mode")
- debug := fs.String("debug.unused-graph", "", "Write unused's object graph to `file`")
- fs.Parse(os.Args[1:])
-
- var cs []*analysis.Analyzer
- for _, v := range simple.Analyzers {
- cs = append(cs, v)
- }
- for _, v := range staticcheck.Analyzers {
- cs = append(cs, v)
- }
- for _, v := range stylecheck.Analyzers {
- cs = append(cs, v)
- }
-
- u := unused.NewChecker(*wholeProgram)
- if *debug != "" {
- f, err := os.OpenFile(*debug, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666)
- if err != nil {
- log.Fatal(err)
- }
- u.Debug = f
- }
- cums := []lint.CumulativeChecker{u}
- lintutil.ProcessFlagSet(cs, cums, fs)
-}
diff --git a/vendor/honnef.co/go/tools/code/code.go b/vendor/honnef.co/go/tools/code/code.go
deleted file mode 100644
index 6f4df8b9aa6..00000000000
--- a/vendor/honnef.co/go/tools/code/code.go
+++ /dev/null
@@ -1,481 +0,0 @@
-// Package code answers structural and type questions about Go code.
-package code
-
-import (
- "flag"
- "fmt"
- "go/ast"
- "go/constant"
- "go/token"
- "go/types"
- "strings"
-
- "golang.org/x/tools/go/analysis"
- "golang.org/x/tools/go/analysis/passes/inspect"
- "golang.org/x/tools/go/ast/astutil"
- "golang.org/x/tools/go/ast/inspector"
- "honnef.co/go/tools/facts"
- "honnef.co/go/tools/go/types/typeutil"
- "honnef.co/go/tools/ir"
- "honnef.co/go/tools/lint"
-)
-
-type Positioner interface {
- Pos() token.Pos
-}
-
-func CallName(call *ir.CallCommon) string {
- if call.IsInvoke() {
- return ""
- }
- switch v := call.Value.(type) {
- case *ir.Function:
- fn, ok := v.Object().(*types.Func)
- if !ok {
- return ""
- }
- return lint.FuncName(fn)
- case *ir.Builtin:
- return v.Name()
- }
- return ""
-}
-
-func IsCallTo(call *ir.CallCommon, name string) bool { return CallName(call) == name }
-
-func IsCallToAny(call *ir.CallCommon, names ...string) bool {
- q := CallName(call)
- for _, name := range names {
- if q == name {
- return true
- }
- }
- return false
-}
-
-func IsType(T types.Type, name string) bool { return types.TypeString(T, nil) == name }
-
-func FilterDebug(instr []ir.Instruction) []ir.Instruction {
- var out []ir.Instruction
- for _, ins := range instr {
- if _, ok := ins.(*ir.DebugRef); !ok {
- out = append(out, ins)
- }
- }
- return out
-}
-
-func IsExample(fn *ir.Function) bool {
- if !strings.HasPrefix(fn.Name(), "Example") {
- return false
- }
- f := fn.Prog.Fset.File(fn.Pos())
- if f == nil {
- return false
- }
- return strings.HasSuffix(f.Name(), "_test.go")
-}
-
-func IsPointerLike(T types.Type) bool {
- switch T := T.Underlying().(type) {
- case *types.Interface, *types.Chan, *types.Map, *types.Signature, *types.Pointer:
- return true
- case *types.Basic:
- return T.Kind() == types.UnsafePointer
- }
- return false
-}
-
-func IsIdent(expr ast.Expr, ident string) bool {
- id, ok := expr.(*ast.Ident)
- return ok && id.Name == ident
-}
-
-// isBlank returns whether id is the blank identifier "_".
-// If id == nil, the answer is false.
-func IsBlank(id ast.Expr) bool {
- ident, _ := id.(*ast.Ident)
- return ident != nil && ident.Name == "_"
-}
-
-func IsIntLiteral(expr ast.Expr, literal string) bool {
- lit, ok := expr.(*ast.BasicLit)
- return ok && lit.Kind == token.INT && lit.Value == literal
-}
-
-// Deprecated: use IsIntLiteral instead
-func IsZero(expr ast.Expr) bool {
- return IsIntLiteral(expr, "0")
-}
-
-func IsOfType(pass *analysis.Pass, expr ast.Expr, name string) bool {
- return IsType(pass.TypesInfo.TypeOf(expr), name)
-}
-
-func IsInTest(pass *analysis.Pass, node Positioner) bool {
- // FIXME(dh): this doesn't work for global variables with
- // initializers
- f := pass.Fset.File(node.Pos())
- return f != nil && strings.HasSuffix(f.Name(), "_test.go")
-}
-
-// IsMain reports whether the package being processed is a package
-// main.
-func IsMain(pass *analysis.Pass) bool {
- return pass.Pkg.Name() == "main"
-}
-
-// IsMainLike reports whether the package being processed is a
-// main-like package. A main-like package is a package that is
-// package main, or that is intended to be used by a tool framework
-// such as cobra to implement a command.
-//
-// Note that this function errs on the side of false positives; it may
-// return true for packages that aren't main-like. IsMainLike is
-// intended for analyses that wish to suppress diagnostics for
-// main-like packages to avoid false positives.
-func IsMainLike(pass *analysis.Pass) bool {
- if pass.Pkg.Name() == "main" {
- return true
- }
- for _, imp := range pass.Pkg.Imports() {
- if imp.Path() == "github.com/spf13/cobra" {
- return true
- }
- }
- return false
-}
-
-func SelectorName(pass *analysis.Pass, expr *ast.SelectorExpr) string {
- info := pass.TypesInfo
- sel := info.Selections[expr]
- if sel == nil {
- if x, ok := expr.X.(*ast.Ident); ok {
- pkg, ok := info.ObjectOf(x).(*types.PkgName)
- if !ok {
- // This shouldn't happen
- return fmt.Sprintf("%s.%s", x.Name, expr.Sel.Name)
- }
- return fmt.Sprintf("%s.%s", pkg.Imported().Path(), expr.Sel.Name)
- }
- panic(fmt.Sprintf("unsupported selector: %v", expr))
- }
- return fmt.Sprintf("(%s).%s", sel.Recv(), sel.Obj().Name())
-}
-
-func IsNil(pass *analysis.Pass, expr ast.Expr) bool {
- return pass.TypesInfo.Types[expr].IsNil()
-}
-
-func BoolConst(pass *analysis.Pass, expr ast.Expr) bool {
- val := pass.TypesInfo.ObjectOf(expr.(*ast.Ident)).(*types.Const).Val()
- return constant.BoolVal(val)
-}
-
-func IsBoolConst(pass *analysis.Pass, expr ast.Expr) bool {
- // We explicitly don't support typed bools because more often than
- // not, custom bool types are used as binary enums and the
- // explicit comparison is desired.
-
- ident, ok := expr.(*ast.Ident)
- if !ok {
- return false
- }
- obj := pass.TypesInfo.ObjectOf(ident)
- c, ok := obj.(*types.Const)
- if !ok {
- return false
- }
- basic, ok := c.Type().(*types.Basic)
- if !ok {
- return false
- }
- if basic.Kind() != types.UntypedBool && basic.Kind() != types.Bool {
- return false
- }
- return true
-}
-
-func ExprToInt(pass *analysis.Pass, expr ast.Expr) (int64, bool) {
- tv := pass.TypesInfo.Types[expr]
- if tv.Value == nil {
- return 0, false
- }
- if tv.Value.Kind() != constant.Int {
- return 0, false
- }
- return constant.Int64Val(tv.Value)
-}
-
-func ExprToString(pass *analysis.Pass, expr ast.Expr) (string, bool) {
- val := pass.TypesInfo.Types[expr].Value
- if val == nil {
- return "", false
- }
- if val.Kind() != constant.String {
- return "", false
- }
- return constant.StringVal(val), true
-}
-
-// Dereference returns a pointer's element type; otherwise it returns
-// T.
-func Dereference(T types.Type) types.Type {
- if p, ok := T.Underlying().(*types.Pointer); ok {
- return p.Elem()
- }
- return T
-}
-
-// DereferenceR returns a pointer's element type; otherwise it returns
-// T. If the element type is itself a pointer, DereferenceR will be
-// applied recursively.
-func DereferenceR(T types.Type) types.Type {
- if p, ok := T.Underlying().(*types.Pointer); ok {
- return DereferenceR(p.Elem())
- }
- return T
-}
-
-func CallNameAST(pass *analysis.Pass, call *ast.CallExpr) string {
- switch fun := astutil.Unparen(call.Fun).(type) {
- case *ast.SelectorExpr:
- fn, ok := pass.TypesInfo.ObjectOf(fun.Sel).(*types.Func)
- if !ok {
- return ""
- }
- return lint.FuncName(fn)
- case *ast.Ident:
- obj := pass.TypesInfo.ObjectOf(fun)
- switch obj := obj.(type) {
- case *types.Func:
- return lint.FuncName(obj)
- case *types.Builtin:
- return obj.Name()
- default:
- return ""
- }
- default:
- return ""
- }
-}
-
-func IsCallToAST(pass *analysis.Pass, node ast.Node, name string) bool {
- call, ok := node.(*ast.CallExpr)
- if !ok {
- return false
- }
- return CallNameAST(pass, call) == name
-}
-
-func IsCallToAnyAST(pass *analysis.Pass, node ast.Node, names ...string) bool {
- call, ok := node.(*ast.CallExpr)
- if !ok {
- return false
- }
- q := CallNameAST(pass, call)
- for _, name := range names {
- if q == name {
- return true
- }
- }
- return false
-}
-
-func Preamble(f *ast.File) string {
- cutoff := f.Package
- if f.Doc != nil {
- cutoff = f.Doc.Pos()
- }
- var out []string
- for _, cmt := range f.Comments {
- if cmt.Pos() >= cutoff {
- break
- }
- out = append(out, cmt.Text())
- }
- return strings.Join(out, "\n")
-}
-
-func GroupSpecs(fset *token.FileSet, specs []ast.Spec) [][]ast.Spec {
- if len(specs) == 0 {
- return nil
- }
- groups := make([][]ast.Spec, 1)
- groups[0] = append(groups[0], specs[0])
-
- for _, spec := range specs[1:] {
- g := groups[len(groups)-1]
- if fset.PositionFor(spec.Pos(), false).Line-1 !=
- fset.PositionFor(g[len(g)-1].End(), false).Line {
-
- groups = append(groups, nil)
- }
-
- groups[len(groups)-1] = append(groups[len(groups)-1], spec)
- }
-
- return groups
-}
-
-func IsObject(obj types.Object, name string) bool {
- var path string
- if pkg := obj.Pkg(); pkg != nil {
- path = pkg.Path() + "."
- }
- return path+obj.Name() == name
-}
-
-type Field struct {
- Var *types.Var
- Tag string
- Path []int
-}
-
-// FlattenFields recursively flattens T and embedded structs,
-// returning a list of fields. If multiple fields with the same name
-// exist, all will be returned.
-func FlattenFields(T *types.Struct) []Field {
- return flattenFields(T, nil, nil)
-}
-
-func flattenFields(T *types.Struct, path []int, seen map[types.Type]bool) []Field {
- if seen == nil {
- seen = map[types.Type]bool{}
- }
- if seen[T] {
- return nil
- }
- seen[T] = true
- var out []Field
- for i := 0; i < T.NumFields(); i++ {
- field := T.Field(i)
- tag := T.Tag(i)
- np := append(path[:len(path):len(path)], i)
- if field.Anonymous() {
- if s, ok := Dereference(field.Type()).Underlying().(*types.Struct); ok {
- out = append(out, flattenFields(s, np, seen)...)
- }
- } else {
- out = append(out, Field{field, tag, np})
- }
- }
- return out
-}
-
-func File(pass *analysis.Pass, node Positioner) *ast.File {
- m := pass.ResultOf[facts.TokenFile].(map[*token.File]*ast.File)
- return m[pass.Fset.File(node.Pos())]
-}
-
-// IsGenerated reports whether pos is in a generated file, It ignores
-// //line directives.
-func IsGenerated(pass *analysis.Pass, pos token.Pos) bool {
- _, ok := Generator(pass, pos)
- return ok
-}
-
-// Generator returns the generator that generated the file containing
-// pos. It ignores //line directives.
-func Generator(pass *analysis.Pass, pos token.Pos) (facts.Generator, bool) {
- file := pass.Fset.PositionFor(pos, false).Filename
- m := pass.ResultOf[facts.Generated].(map[string]facts.Generator)
- g, ok := m[file]
- return g, ok
-}
-
-// MayHaveSideEffects reports whether expr may have side effects. If
-// the purity argument is nil, this function implements a purely
-// syntactic check, meaning that any function call may have side
-// effects, regardless of the called function's body. Otherwise,
-// purity will be consulted to determine the purity of function calls.
-func MayHaveSideEffects(pass *analysis.Pass, expr ast.Expr, purity facts.PurityResult) bool {
- switch expr := expr.(type) {
- case *ast.BadExpr:
- return true
- case *ast.Ellipsis:
- return MayHaveSideEffects(pass, expr.Elt, purity)
- case *ast.FuncLit:
- // the literal itself cannot have side ffects, only calling it
- // might, which is handled by CallExpr.
- return false
- case *ast.ArrayType, *ast.StructType, *ast.FuncType, *ast.InterfaceType, *ast.MapType, *ast.ChanType:
- // types cannot have side effects
- return false
- case *ast.BasicLit:
- return false
- case *ast.BinaryExpr:
- return MayHaveSideEffects(pass, expr.X, purity) || MayHaveSideEffects(pass, expr.Y, purity)
- case *ast.CallExpr:
- if purity == nil {
- return true
- }
- switch obj := typeutil.Callee(pass.TypesInfo, expr).(type) {
- case *types.Func:
- if _, ok := purity[obj]; !ok {
- return true
- }
- case *types.Builtin:
- switch obj.Name() {
- case "len", "cap":
- default:
- return true
- }
- default:
- return true
- }
- for _, arg := range expr.Args {
- if MayHaveSideEffects(pass, arg, purity) {
- return true
- }
- }
- return false
- case *ast.CompositeLit:
- if MayHaveSideEffects(pass, expr.Type, purity) {
- return true
- }
- for _, elt := range expr.Elts {
- if MayHaveSideEffects(pass, elt, purity) {
- return true
- }
- }
- return false
- case *ast.Ident:
- return false
- case *ast.IndexExpr:
- return MayHaveSideEffects(pass, expr.X, purity) || MayHaveSideEffects(pass, expr.Index, purity)
- case *ast.KeyValueExpr:
- return MayHaveSideEffects(pass, expr.Key, purity) || MayHaveSideEffects(pass, expr.Value, purity)
- case *ast.SelectorExpr:
- return MayHaveSideEffects(pass, expr.X, purity)
- case *ast.SliceExpr:
- return MayHaveSideEffects(pass, expr.X, purity) ||
- MayHaveSideEffects(pass, expr.Low, purity) ||
- MayHaveSideEffects(pass, expr.High, purity) ||
- MayHaveSideEffects(pass, expr.Max, purity)
- case *ast.StarExpr:
- return MayHaveSideEffects(pass, expr.X, purity)
- case *ast.TypeAssertExpr:
- return MayHaveSideEffects(pass, expr.X, purity)
- case *ast.UnaryExpr:
- if MayHaveSideEffects(pass, expr.X, purity) {
- return true
- }
- return expr.Op == token.ARROW
- case *ast.ParenExpr:
- return MayHaveSideEffects(pass, expr.X, purity)
- case nil:
- return false
- default:
- panic(fmt.Sprintf("internal error: unhandled type %T", expr))
- }
-}
-
-func IsGoVersion(pass *analysis.Pass, minor int) bool {
- version := pass.Analyzer.Flags.Lookup("go").Value.(flag.Getter).Get().(int)
- return version >= minor
-}
-
-func Preorder(pass *analysis.Pass, fn func(ast.Node), types ...ast.Node) {
- pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Preorder(types, fn)
-}
diff --git a/vendor/honnef.co/go/tools/config/config.go b/vendor/honnef.co/go/tools/config/config.go
deleted file mode 100644
index 55115371b9f..00000000000
--- a/vendor/honnef.co/go/tools/config/config.go
+++ /dev/null
@@ -1,245 +0,0 @@
-package config
-
-import (
- "bytes"
- "fmt"
- "go/ast"
- "go/token"
- "os"
- "path/filepath"
- "reflect"
- "strings"
-
- "github.com/BurntSushi/toml"
- "golang.org/x/tools/go/analysis"
-)
-
-// Dir looks at a list of absolute file names, which should make up a
-// single package, and returns the path of the directory that may
-// contain a staticcheck.conf file. It returns the empty string if no
-// such directory could be determined, for example because all files
-// were located in Go's build cache.
-func Dir(files []string) string {
- if len(files) == 0 {
- return ""
- }
- cache, err := os.UserCacheDir()
- if err != nil {
- cache = ""
- }
- var path string
- for _, p := range files {
- // FIXME(dh): using strings.HasPrefix isn't technically
- // correct, but it should be good enough for now.
- if cache != "" && strings.HasPrefix(p, cache) {
- // File in the build cache of the standard Go build system
- continue
- }
- path = p
- break
- }
-
- if path == "" {
- // The package only consists of generated files.
- return ""
- }
-
- dir := filepath.Dir(path)
- return dir
-}
-
-func dirAST(files []*ast.File, fset *token.FileSet) string {
- names := make([]string, len(files))
- for i, f := range files {
- names[i] = fset.PositionFor(f.Pos(), true).Filename
- }
- return Dir(names)
-}
-
-var Analyzer = &analysis.Analyzer{
- Name: "config",
- Doc: "loads configuration for the current package tree",
- Run: func(pass *analysis.Pass) (interface{}, error) {
- dir := dirAST(pass.Files, pass.Fset)
- if dir == "" {
- cfg := DefaultConfig
- return &cfg, nil
- }
- cfg, err := Load(dir)
- if err != nil {
- return nil, fmt.Errorf("error loading staticcheck.conf: %s", err)
- }
- return &cfg, nil
- },
- RunDespiteErrors: true,
- ResultType: reflect.TypeOf((*Config)(nil)),
-}
-
-func For(pass *analysis.Pass) *Config {
- return pass.ResultOf[Analyzer].(*Config)
-}
-
-func mergeLists(a, b []string) []string {
- out := make([]string, 0, len(a)+len(b))
- for _, el := range b {
- if el == "inherit" {
- out = append(out, a...)
- } else {
- out = append(out, el)
- }
- }
-
- return out
-}
-
-func normalizeList(list []string) []string {
- if len(list) > 1 {
- nlist := make([]string, 0, len(list))
- nlist = append(nlist, list[0])
- for i, el := range list[1:] {
- if el != list[i] {
- nlist = append(nlist, el)
- }
- }
- list = nlist
- }
-
- for _, el := range list {
- if el == "inherit" {
- // This should never happen, because the default config
- // should not use "inherit"
- panic(`unresolved "inherit"`)
- }
- }
-
- return list
-}
-
-func (cfg Config) Merge(ocfg Config) Config {
- if ocfg.Checks != nil {
- cfg.Checks = mergeLists(cfg.Checks, ocfg.Checks)
- }
- if ocfg.Initialisms != nil {
- cfg.Initialisms = mergeLists(cfg.Initialisms, ocfg.Initialisms)
- }
- if ocfg.DotImportWhitelist != nil {
- cfg.DotImportWhitelist = mergeLists(cfg.DotImportWhitelist, ocfg.DotImportWhitelist)
- }
- if ocfg.HTTPStatusCodeWhitelist != nil {
- cfg.HTTPStatusCodeWhitelist = mergeLists(cfg.HTTPStatusCodeWhitelist, ocfg.HTTPStatusCodeWhitelist)
- }
- return cfg
-}
-
-type Config struct {
- // TODO(dh): this implementation makes it impossible for external
- // clients to add their own checkers with configuration. At the
- // moment, we don't really care about that; we don't encourage
- // that people use this package. In the future, we may. The
- // obvious solution would be using map[string]interface{}, but
- // that's obviously subpar.
-
- Checks []string `toml:"checks"`
- Initialisms []string `toml:"initialisms"`
- DotImportWhitelist []string `toml:"dot_import_whitelist"`
- HTTPStatusCodeWhitelist []string `toml:"http_status_code_whitelist"`
-}
-
-func (c Config) String() string {
- buf := &bytes.Buffer{}
-
- fmt.Fprintf(buf, "Checks: %#v\n", c.Checks)
- fmt.Fprintf(buf, "Initialisms: %#v\n", c.Initialisms)
- fmt.Fprintf(buf, "DotImportWhitelist: %#v\n", c.DotImportWhitelist)
- fmt.Fprintf(buf, "HTTPStatusCodeWhitelist: %#v", c.HTTPStatusCodeWhitelist)
-
- return buf.String()
-}
-
-var DefaultConfig = Config{
- Checks: []string{"all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022"},
- Initialisms: []string{
- "ACL", "API", "ASCII", "CPU", "CSS", "DNS",
- "EOF", "GUID", "HTML", "HTTP", "HTTPS", "ID",
- "IP", "JSON", "QPS", "RAM", "RPC", "SLA",
- "SMTP", "SQL", "SSH", "TCP", "TLS", "TTL",
- "UDP", "UI", "GID", "UID", "UUID", "URI",
- "URL", "UTF8", "VM", "XML", "XMPP", "XSRF",
- "XSS", "SIP", "RTP", "AMQP", "DB", "TS",
- },
- DotImportWhitelist: []string{},
- HTTPStatusCodeWhitelist: []string{"200", "400", "404", "500"},
-}
-
-const ConfigName = "staticcheck.conf"
-
-func parseConfigs(dir string) ([]Config, error) {
- var out []Config
-
- // TODO(dh): consider stopping at the GOPATH/module boundary
- for dir != "" {
- f, err := os.Open(filepath.Join(dir, ConfigName))
- if os.IsNotExist(err) {
- ndir := filepath.Dir(dir)
- if ndir == dir {
- break
- }
- dir = ndir
- continue
- }
- if err != nil {
- return nil, err
- }
- var cfg Config
- _, err = toml.DecodeReader(f, &cfg)
- f.Close()
- if err != nil {
- return nil, err
- }
- out = append(out, cfg)
- ndir := filepath.Dir(dir)
- if ndir == dir {
- break
- }
- dir = ndir
- }
- out = append(out, DefaultConfig)
- if len(out) < 2 {
- return out, nil
- }
- for i := 0; i < len(out)/2; i++ {
- out[i], out[len(out)-1-i] = out[len(out)-1-i], out[i]
- }
- return out, nil
-}
-
-func mergeConfigs(confs []Config) Config {
- if len(confs) == 0 {
- // This shouldn't happen because we always have at least a
- // default config.
- panic("trying to merge zero configs")
- }
- if len(confs) == 1 {
- return confs[0]
- }
- conf := confs[0]
- for _, oconf := range confs[1:] {
- conf = conf.Merge(oconf)
- }
- return conf
-}
-
-func Load(dir string) (Config, error) {
- confs, err := parseConfigs(dir)
- if err != nil {
- return Config{}, err
- }
- conf := mergeConfigs(confs)
-
- conf.Checks = normalizeList(conf.Checks)
- conf.Initialisms = normalizeList(conf.Initialisms)
- conf.DotImportWhitelist = normalizeList(conf.DotImportWhitelist)
- conf.HTTPStatusCodeWhitelist = normalizeList(conf.HTTPStatusCodeWhitelist)
-
- return conf, nil
-}
diff --git a/vendor/honnef.co/go/tools/config/example.conf b/vendor/honnef.co/go/tools/config/example.conf
deleted file mode 100644
index a715a24d4fc..00000000000
--- a/vendor/honnef.co/go/tools/config/example.conf
+++ /dev/null
@@ -1,10 +0,0 @@
-checks = ["all", "-ST1003", "-ST1014"]
-initialisms = ["ACL", "API", "ASCII", "CPU", "CSS", "DNS",
- "EOF", "GUID", "HTML", "HTTP", "HTTPS", "ID",
- "IP", "JSON", "QPS", "RAM", "RPC", "SLA",
- "SMTP", "SQL", "SSH", "TCP", "TLS", "TTL",
- "UDP", "UI", "GID", "UID", "UUID", "URI",
- "URL", "UTF8", "VM", "XML", "XMPP", "XSRF",
- "XSS", "SIP", "RTP"]
-dot_import_whitelist = []
-http_status_code_whitelist = ["200", "400", "404", "500"]
diff --git a/vendor/honnef.co/go/tools/deprecated/stdlib.go b/vendor/honnef.co/go/tools/deprecated/stdlib.go
deleted file mode 100644
index cabb8500a25..00000000000
--- a/vendor/honnef.co/go/tools/deprecated/stdlib.go
+++ /dev/null
@@ -1,119 +0,0 @@
-package deprecated
-
-type Deprecation struct {
- DeprecatedSince int
- AlternativeAvailableSince int
-}
-
-var Stdlib = map[string]Deprecation{
- // FIXME(dh): AllowBinary isn't being detected as deprecated
- // because the comment has a newline right after "Deprecated:"
- "go/build.AllowBinary": {7, 7},
- "(archive/zip.FileHeader).CompressedSize": {1, 1},
- "(archive/zip.FileHeader).UncompressedSize": {1, 1},
- "(archive/zip.FileHeader).ModifiedTime": {10, 10},
- "(archive/zip.FileHeader).ModifiedDate": {10, 10},
- "(*archive/zip.FileHeader).ModTime": {10, 10},
- "(*archive/zip.FileHeader).SetModTime": {10, 10},
- "(go/doc.Package).Bugs": {1, 1},
- "os.SEEK_SET": {7, 7},
- "os.SEEK_CUR": {7, 7},
- "os.SEEK_END": {7, 7},
- "(net.Dialer).Cancel": {7, 7},
- "runtime.CPUProfile": {9, 0},
- "compress/flate.ReadError": {6, 6},
- "compress/flate.WriteError": {6, 6},
- "path/filepath.HasPrefix": {0, 0},
- "(net/http.Transport).Dial": {7, 7},
- "(*net/http.Transport).CancelRequest": {6, 5},
- "net/http.ErrWriteAfterFlush": {7, 0},
- "net/http.ErrHeaderTooLong": {8, 0},
- "net/http.ErrShortBody": {8, 0},
- "net/http.ErrMissingContentLength": {8, 0},
- "net/http/httputil.ErrPersistEOF": {0, 0},
- "net/http/httputil.ErrClosed": {0, 0},
- "net/http/httputil.ErrPipeline": {0, 0},
- "net/http/httputil.ServerConn": {0, 0},
- "net/http/httputil.NewServerConn": {0, 0},
- "net/http/httputil.ClientConn": {0, 0},
- "net/http/httputil.NewClientConn": {0, 0},
- "net/http/httputil.NewProxyClientConn": {0, 0},
- "(net/http.Request).Cancel": {7, 7},
- "(text/template/parse.PipeNode).Line": {1, 1},
- "(text/template/parse.ActionNode).Line": {1, 1},
- "(text/template/parse.BranchNode).Line": {1, 1},
- "(text/template/parse.TemplateNode).Line": {1, 1},
- "database/sql/driver.ColumnConverter": {9, 9},
- "database/sql/driver.Execer": {8, 8},
- "database/sql/driver.Queryer": {8, 8},
- "(database/sql/driver.Conn).Begin": {8, 8},
- "(database/sql/driver.Stmt).Exec": {8, 8},
- "(database/sql/driver.Stmt).Query": {8, 8},
- "syscall.StringByteSlice": {1, 1},
- "syscall.StringBytePtr": {1, 1},
- "syscall.StringSlicePtr": {1, 1},
- "syscall.StringToUTF16": {1, 1},
- "syscall.StringToUTF16Ptr": {1, 1},
- "(*regexp.Regexp).Copy": {12, 12},
- "(archive/tar.Header).Xattrs": {10, 10},
- "archive/tar.TypeRegA": {11, 1},
- "go/types.NewInterface": {11, 11},
- "(*go/types.Interface).Embedded": {11, 11},
- "go/importer.For": {12, 12},
- "encoding/json.InvalidUTF8Error": {2, 2},
- "encoding/json.UnmarshalFieldError": {2, 2},
- "encoding/csv.ErrTrailingComma": {2, 2},
- "(encoding/csv.Reader).TrailingComma": {2, 2},
- "(net.Dialer).DualStack": {12, 12},
- "net/http.ErrUnexpectedTrailer": {12, 12},
- "net/http.CloseNotifier": {11, 7},
- "net/http.ProtocolError": {8, 8},
- "(crypto/x509.CertificateRequest).Attributes": {5, 3},
- // This function has no alternative, but also no purpose.
- "(*crypto/rc4.Cipher).Reset": {12, 0},
- "(net/http/httptest.ResponseRecorder).HeaderMap": {11, 7},
- "image.ZP": {13, 0},
- "image.ZR": {13, 0},
- "(*debug/gosym.LineTable).LineToPC": {2, 2},
- "(*debug/gosym.LineTable).PCToLine": {2, 2},
- "crypto/tls.VersionSSL30": {13, 0},
- "(crypto/tls.Config).NameToCertificate": {14, 14},
- "(*crypto/tls.Config).BuildNameToCertificate": {14, 14},
- "image/jpeg.Reader": {4, 0},
-
- // All of these have been deprecated in favour of external libraries
- "syscall.AttachLsf": {7, 0},
- "syscall.DetachLsf": {7, 0},
- "syscall.LsfSocket": {7, 0},
- "syscall.SetLsfPromisc": {7, 0},
- "syscall.LsfJump": {7, 0},
- "syscall.LsfStmt": {7, 0},
- "syscall.BpfStmt": {7, 0},
- "syscall.BpfJump": {7, 0},
- "syscall.BpfBuflen": {7, 0},
- "syscall.SetBpfBuflen": {7, 0},
- "syscall.BpfDatalink": {7, 0},
- "syscall.SetBpfDatalink": {7, 0},
- "syscall.SetBpfPromisc": {7, 0},
- "syscall.FlushBpf": {7, 0},
- "syscall.BpfInterface": {7, 0},
- "syscall.SetBpfInterface": {7, 0},
- "syscall.BpfTimeout": {7, 0},
- "syscall.SetBpfTimeout": {7, 0},
- "syscall.BpfStats": {7, 0},
- "syscall.SetBpfImmediate": {7, 0},
- "syscall.SetBpf": {7, 0},
- "syscall.CheckBpfVersion": {7, 0},
- "syscall.BpfHeadercmpl": {7, 0},
- "syscall.SetBpfHeadercmpl": {7, 0},
- "syscall.RouteRIB": {8, 0},
- "syscall.RoutingMessage": {8, 0},
- "syscall.RouteMessage": {8, 0},
- "syscall.InterfaceMessage": {8, 0},
- "syscall.InterfaceAddrMessage": {8, 0},
- "syscall.ParseRoutingMessage": {8, 0},
- "syscall.ParseRoutingSockaddr": {8, 0},
- "syscall.InterfaceAnnounceMessage": {7, 0},
- "syscall.InterfaceMulticastAddrMessage": {7, 0},
- "syscall.FormatMessage": {5, 0},
-}
diff --git a/vendor/honnef.co/go/tools/edit/edit.go b/vendor/honnef.co/go/tools/edit/edit.go
deleted file mode 100644
index f4cfba2347c..00000000000
--- a/vendor/honnef.co/go/tools/edit/edit.go
+++ /dev/null
@@ -1,67 +0,0 @@
-package edit
-
-import (
- "bytes"
- "go/ast"
- "go/format"
- "go/token"
-
- "golang.org/x/tools/go/analysis"
- "honnef.co/go/tools/pattern"
-)
-
-type Ranger interface {
- Pos() token.Pos
- End() token.Pos
-}
-
-type Range [2]token.Pos
-
-func (r Range) Pos() token.Pos { return r[0] }
-func (r Range) End() token.Pos { return r[1] }
-
-func ReplaceWithString(fset *token.FileSet, old Ranger, new string) analysis.TextEdit {
- return analysis.TextEdit{
- Pos: old.Pos(),
- End: old.End(),
- NewText: []byte(new),
- }
-}
-
-func ReplaceWithNode(fset *token.FileSet, old Ranger, new ast.Node) analysis.TextEdit {
- buf := &bytes.Buffer{}
- if err := format.Node(buf, fset, new); err != nil {
- panic("internal error: " + err.Error())
- }
- return analysis.TextEdit{
- Pos: old.Pos(),
- End: old.End(),
- NewText: buf.Bytes(),
- }
-}
-
-func ReplaceWithPattern(pass *analysis.Pass, after pattern.Pattern, state pattern.State, node Ranger) analysis.TextEdit {
- r := pattern.NodeToAST(after.Root, state)
- buf := &bytes.Buffer{}
- format.Node(buf, pass.Fset, r)
- return analysis.TextEdit{
- Pos: node.Pos(),
- End: node.End(),
- NewText: buf.Bytes(),
- }
-}
-
-func Delete(old Ranger) analysis.TextEdit {
- return analysis.TextEdit{
- Pos: old.Pos(),
- End: old.End(),
- NewText: nil,
- }
-}
-
-func Fix(msg string, edits ...analysis.TextEdit) analysis.SuggestedFix {
- return analysis.SuggestedFix{
- Message: msg,
- TextEdits: edits,
- }
-}
diff --git a/vendor/honnef.co/go/tools/facts/deprecated.go b/vendor/honnef.co/go/tools/facts/deprecated.go
deleted file mode 100644
index 8587b0e0eae..00000000000
--- a/vendor/honnef.co/go/tools/facts/deprecated.go
+++ /dev/null
@@ -1,144 +0,0 @@
-package facts
-
-import (
- "go/ast"
- "go/token"
- "go/types"
- "reflect"
- "strings"
-
- "golang.org/x/tools/go/analysis"
-)
-
-type IsDeprecated struct{ Msg string }
-
-func (*IsDeprecated) AFact() {}
-func (d *IsDeprecated) String() string { return "Deprecated: " + d.Msg }
-
-type DeprecatedResult struct {
- Objects map[types.Object]*IsDeprecated
- Packages map[*types.Package]*IsDeprecated
-}
-
-var Deprecated = &analysis.Analyzer{
- Name: "fact_deprecated",
- Doc: "Mark deprecated objects",
- Run: deprecated,
- FactTypes: []analysis.Fact{(*IsDeprecated)(nil)},
- ResultType: reflect.TypeOf(DeprecatedResult{}),
-}
-
-func deprecated(pass *analysis.Pass) (interface{}, error) {
- var names []*ast.Ident
-
- extractDeprecatedMessage := func(docs []*ast.CommentGroup) string {
- for _, doc := range docs {
- if doc == nil {
- continue
- }
- parts := strings.Split(doc.Text(), "\n\n")
- last := parts[len(parts)-1]
- if !strings.HasPrefix(last, "Deprecated: ") {
- continue
- }
- alt := last[len("Deprecated: "):]
- alt = strings.Replace(alt, "\n", " ", -1)
- return alt
- }
- return ""
- }
- doDocs := func(names []*ast.Ident, docs []*ast.CommentGroup) {
- alt := extractDeprecatedMessage(docs)
- if alt == "" {
- return
- }
-
- for _, name := range names {
- obj := pass.TypesInfo.ObjectOf(name)
- pass.ExportObjectFact(obj, &IsDeprecated{alt})
- }
- }
-
- var docs []*ast.CommentGroup
- for _, f := range pass.Files {
- docs = append(docs, f.Doc)
- }
- if alt := extractDeprecatedMessage(docs); alt != "" {
- // Don't mark package syscall as deprecated, even though
- // it is. A lot of people still use it for simple
- // constants like SIGKILL, and I am not comfortable
- // telling them to use x/sys for that.
- if pass.Pkg.Path() != "syscall" {
- pass.ExportPackageFact(&IsDeprecated{alt})
- }
- }
-
- docs = docs[:0]
- for _, f := range pass.Files {
- fn := func(node ast.Node) bool {
- if node == nil {
- return true
- }
- var ret bool
- switch node := node.(type) {
- case *ast.GenDecl:
- switch node.Tok {
- case token.TYPE, token.CONST, token.VAR:
- docs = append(docs, node.Doc)
- return true
- default:
- return false
- }
- case *ast.FuncDecl:
- docs = append(docs, node.Doc)
- names = []*ast.Ident{node.Name}
- ret = false
- case *ast.TypeSpec:
- docs = append(docs, node.Doc)
- names = []*ast.Ident{node.Name}
- ret = true
- case *ast.ValueSpec:
- docs = append(docs, node.Doc)
- names = node.Names
- ret = false
- case *ast.File:
- return true
- case *ast.StructType:
- for _, field := range node.Fields.List {
- doDocs(field.Names, []*ast.CommentGroup{field.Doc})
- }
- return false
- case *ast.InterfaceType:
- for _, field := range node.Methods.List {
- doDocs(field.Names, []*ast.CommentGroup{field.Doc})
- }
- return false
- default:
- return false
- }
- if len(names) == 0 || len(docs) == 0 {
- return ret
- }
- doDocs(names, docs)
-
- docs = docs[:0]
- names = nil
- return ret
- }
- ast.Inspect(f, fn)
- }
-
- out := DeprecatedResult{
- Objects: map[types.Object]*IsDeprecated{},
- Packages: map[*types.Package]*IsDeprecated{},
- }
-
- for _, fact := range pass.AllObjectFacts() {
- out.Objects[fact.Object] = fact.Fact.(*IsDeprecated)
- }
- for _, fact := range pass.AllPackageFacts() {
- out.Packages[fact.Package] = fact.Fact.(*IsDeprecated)
- }
-
- return out, nil
-}
diff --git a/vendor/honnef.co/go/tools/facts/generated.go b/vendor/honnef.co/go/tools/facts/generated.go
deleted file mode 100644
index 18cbb49bd9a..00000000000
--- a/vendor/honnef.co/go/tools/facts/generated.go
+++ /dev/null
@@ -1,93 +0,0 @@
-package facts
-
-import (
- "bufio"
- "bytes"
- "io"
- "os"
- "reflect"
- "strings"
-
- "golang.org/x/tools/go/analysis"
-)
-
-type Generator int
-
-// A list of known generators we can detect
-const (
- Unknown Generator = iota
- Goyacc
- Cgo
- Stringer
- ProtocGenGo
-)
-
-var (
- // used by cgo before Go 1.11
- oldCgo = []byte("// Created by cgo - DO NOT EDIT")
- prefix = []byte("// Code generated ")
- suffix = []byte(" DO NOT EDIT.")
- nl = []byte("\n")
- crnl = []byte("\r\n")
-)
-
-func isGenerated(path string) (Generator, bool) {
- f, err := os.Open(path)
- if err != nil {
- return 0, false
- }
- defer f.Close()
- br := bufio.NewReader(f)
- for {
- s, err := br.ReadBytes('\n')
- if err != nil && err != io.EOF {
- return 0, false
- }
- s = bytes.TrimSuffix(s, crnl)
- s = bytes.TrimSuffix(s, nl)
- if bytes.HasPrefix(s, prefix) && bytes.HasSuffix(s, suffix) {
- text := string(s[len(prefix) : len(s)-len(suffix)])
- switch text {
- case "by goyacc.":
- return Goyacc, true
- case "by cmd/cgo;":
- return Cgo, true
- case "by protoc-gen-go.":
- return ProtocGenGo, true
- }
- if strings.HasPrefix(text, `by "stringer `) {
- return Stringer, true
- }
- if strings.HasPrefix(text, `by goyacc `) {
- return Goyacc, true
- }
-
- return Unknown, true
- }
- if bytes.Equal(s, oldCgo) {
- return Cgo, true
- }
- if err == io.EOF {
- break
- }
- }
- return 0, false
-}
-
-var Generated = &analysis.Analyzer{
- Name: "isgenerated",
- Doc: "annotate file names that have been code generated",
- Run: func(pass *analysis.Pass) (interface{}, error) {
- m := map[string]Generator{}
- for _, f := range pass.Files {
- path := pass.Fset.PositionFor(f.Pos(), false).Filename
- g, ok := isGenerated(path)
- if ok {
- m[path] = g
- }
- }
- return m, nil
- },
- RunDespiteErrors: true,
- ResultType: reflect.TypeOf(map[string]Generator{}),
-}
diff --git a/vendor/honnef.co/go/tools/facts/purity.go b/vendor/honnef.co/go/tools/facts/purity.go
deleted file mode 100644
index 099ee23e3b8..00000000000
--- a/vendor/honnef.co/go/tools/facts/purity.go
+++ /dev/null
@@ -1,177 +0,0 @@
-package facts
-
-import (
- "go/types"
- "reflect"
-
- "golang.org/x/tools/go/analysis"
- "honnef.co/go/tools/functions"
- "honnef.co/go/tools/internal/passes/buildir"
- "honnef.co/go/tools/ir"
-)
-
-type IsPure struct{}
-
-func (*IsPure) AFact() {}
-func (d *IsPure) String() string { return "is pure" }
-
-type PurityResult map[*types.Func]*IsPure
-
-var Purity = &analysis.Analyzer{
- Name: "fact_purity",
- Doc: "Mark pure functions",
- Run: purity,
- Requires: []*analysis.Analyzer{buildir.Analyzer},
- FactTypes: []analysis.Fact{(*IsPure)(nil)},
- ResultType: reflect.TypeOf(PurityResult{}),
-}
-
-var pureStdlib = map[string]struct{}{
- "errors.New": {},
- "fmt.Errorf": {},
- "fmt.Sprintf": {},
- "fmt.Sprint": {},
- "sort.Reverse": {},
- "strings.Map": {},
- "strings.Repeat": {},
- "strings.Replace": {},
- "strings.Title": {},
- "strings.ToLower": {},
- "strings.ToLowerSpecial": {},
- "strings.ToTitle": {},
- "strings.ToTitleSpecial": {},
- "strings.ToUpper": {},
- "strings.ToUpperSpecial": {},
- "strings.Trim": {},
- "strings.TrimFunc": {},
- "strings.TrimLeft": {},
- "strings.TrimLeftFunc": {},
- "strings.TrimPrefix": {},
- "strings.TrimRight": {},
- "strings.TrimRightFunc": {},
- "strings.TrimSpace": {},
- "strings.TrimSuffix": {},
- "(*net/http.Request).WithContext": {},
-}
-
-func purity(pass *analysis.Pass) (interface{}, error) {
- seen := map[*ir.Function]struct{}{}
- irpkg := pass.ResultOf[buildir.Analyzer].(*buildir.IR).Pkg
- var check func(fn *ir.Function) (ret bool)
- check = func(fn *ir.Function) (ret bool) {
- if fn.Object() == nil {
- // TODO(dh): support closures
- return false
- }
- if pass.ImportObjectFact(fn.Object(), new(IsPure)) {
- return true
- }
- if fn.Pkg != irpkg {
- // Function is in another package but wasn't marked as
- // pure, ergo it isn't pure
- return false
- }
- // Break recursion
- if _, ok := seen[fn]; ok {
- return false
- }
-
- seen[fn] = struct{}{}
- defer func() {
- if ret {
- pass.ExportObjectFact(fn.Object(), &IsPure{})
- }
- }()
-
- if functions.IsStub(fn) {
- return false
- }
-
- if _, ok := pureStdlib[fn.Object().(*types.Func).FullName()]; ok {
- return true
- }
-
- if fn.Signature.Results().Len() == 0 {
- // A function with no return values is empty or is doing some
- // work we cannot see (for example because of build tags);
- // don't consider it pure.
- return false
- }
-
- for _, param := range fn.Params {
- // TODO(dh): this may not be strictly correct. pure code
- // can, to an extent, operate on non-basic types.
- if _, ok := param.Type().Underlying().(*types.Basic); !ok {
- return false
- }
- }
-
- // Don't consider external functions pure.
- if fn.Blocks == nil {
- return false
- }
- checkCall := func(common *ir.CallCommon) bool {
- if common.IsInvoke() {
- return false
- }
- builtin, ok := common.Value.(*ir.Builtin)
- if !ok {
- if common.StaticCallee() != fn {
- if common.StaticCallee() == nil {
- return false
- }
- if !check(common.StaticCallee()) {
- return false
- }
- }
- } else {
- switch builtin.Name() {
- case "len", "cap":
- default:
- return false
- }
- }
- return true
- }
- for _, b := range fn.Blocks {
- for _, ins := range b.Instrs {
- switch ins := ins.(type) {
- case *ir.Call:
- if !checkCall(ins.Common()) {
- return false
- }
- case *ir.Defer:
- if !checkCall(&ins.Call) {
- return false
- }
- case *ir.Select:
- return false
- case *ir.Send:
- return false
- case *ir.Go:
- return false
- case *ir.Panic:
- return false
- case *ir.Store:
- return false
- case *ir.FieldAddr:
- return false
- case *ir.Alloc:
- return false
- case *ir.Load:
- return false
- }
- }
- }
- return true
- }
- for _, fn := range pass.ResultOf[buildir.Analyzer].(*buildir.IR).SrcFuncs {
- check(fn)
- }
-
- out := PurityResult{}
- for _, fact := range pass.AllObjectFacts() {
- out[fact.Object.(*types.Func)] = fact.Fact.(*IsPure)
- }
- return out, nil
-}
diff --git a/vendor/honnef.co/go/tools/facts/token.go b/vendor/honnef.co/go/tools/facts/token.go
deleted file mode 100644
index 26e76ff73d5..00000000000
--- a/vendor/honnef.co/go/tools/facts/token.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package facts
-
-import (
- "go/ast"
- "go/token"
- "reflect"
-
- "golang.org/x/tools/go/analysis"
-)
-
-var TokenFile = &analysis.Analyzer{
- Name: "tokenfileanalyzer",
- Doc: "creates a mapping of *token.File to *ast.File",
- Run: func(pass *analysis.Pass) (interface{}, error) {
- m := map[*token.File]*ast.File{}
- for _, af := range pass.Files {
- tf := pass.Fset.File(af.Pos())
- m[tf] = af
- }
- return m, nil
- },
- RunDespiteErrors: true,
- ResultType: reflect.TypeOf(map[*token.File]*ast.File{}),
-}
diff --git a/vendor/honnef.co/go/tools/functions/loops.go b/vendor/honnef.co/go/tools/functions/loops.go
deleted file mode 100644
index a8af7010084..00000000000
--- a/vendor/honnef.co/go/tools/functions/loops.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package functions
-
-import "honnef.co/go/tools/ir"
-
-type Loop struct{ *ir.BlockSet }
-
-func FindLoops(fn *ir.Function) []Loop {
- if fn.Blocks == nil {
- return nil
- }
- tree := fn.DomPreorder()
- var sets []Loop
- for _, h := range tree {
- for _, n := range h.Preds {
- if !h.Dominates(n) {
- continue
- }
- // n is a back-edge to h
- // h is the loop header
- if n == h {
- set := Loop{ir.NewBlockSet(len(fn.Blocks))}
- set.Add(n)
- sets = append(sets, set)
- continue
- }
- set := Loop{ir.NewBlockSet(len(fn.Blocks))}
- set.Add(h)
- set.Add(n)
- for _, b := range allPredsBut(n, h, nil) {
- set.Add(b)
- }
- sets = append(sets, set)
- }
- }
- return sets
-}
-
-func allPredsBut(b, but *ir.BasicBlock, list []*ir.BasicBlock) []*ir.BasicBlock {
-outer:
- for _, pred := range b.Preds {
- if pred == but {
- continue
- }
- for _, p := range list {
- // TODO improve big-o complexity of this function
- if pred == p {
- continue outer
- }
- }
- list = append(list, pred)
- list = allPredsBut(pred, but, list)
- }
- return list
-}
diff --git a/vendor/honnef.co/go/tools/functions/stub.go b/vendor/honnef.co/go/tools/functions/stub.go
deleted file mode 100644
index 4d5de10b85c..00000000000
--- a/vendor/honnef.co/go/tools/functions/stub.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package functions
-
-import (
- "honnef.co/go/tools/ir"
-)
-
-// IsStub reports whether a function is a stub. A function is
-// considered a stub if it has no instructions or if all it does is
-// return a constant value.
-func IsStub(fn *ir.Function) bool {
- for _, b := range fn.Blocks {
- for _, instr := range b.Instrs {
- switch instr.(type) {
- case *ir.Const:
- // const naturally has no side-effects
- case *ir.Panic:
- // panic is a stub if it only uses constants
- case *ir.Return:
- // return is a stub if it only uses constants
- case *ir.DebugRef:
- case *ir.Jump:
- // if there are no disallowed instructions, then we're
- // only jumping to the exit block (or possibly
- // somewhere else that's stubby?)
- default:
- // all other instructions are assumed to do actual work
- return false
- }
- }
- }
- return true
-}
diff --git a/vendor/honnef.co/go/tools/functions/terminates.go b/vendor/honnef.co/go/tools/functions/terminates.go
deleted file mode 100644
index c4984673f64..00000000000
--- a/vendor/honnef.co/go/tools/functions/terminates.go
+++ /dev/null
@@ -1,70 +0,0 @@
-package functions
-
-import (
- "go/types"
-
- "honnef.co/go/tools/ir"
-)
-
-// Terminates reports whether fn is supposed to return, that is if it
-// has at least one theoretic path that returns from the function.
-// Explicit panics do not count as terminating.
-func Terminates(fn *ir.Function) bool {
- if fn.Blocks == nil {
- // assuming that a function terminates is the conservative
- // choice
- return true
- }
-
- for _, block := range fn.Blocks {
- if _, ok := block.Control().(*ir.Return); ok {
- if len(block.Preds) == 0 {
- return true
- }
- for _, pred := range block.Preds {
- switch ctrl := pred.Control().(type) {
- case *ir.Panic:
- // explicit panics do not count as terminating
- case *ir.If:
- // Check if we got here by receiving from a closed
- // time.Tick channel – this cannot happen at
- // runtime and thus doesn't constitute termination
- iff := ctrl
- if !ok {
- return true
- }
- ex, ok := iff.Cond.(*ir.Extract)
- if !ok {
- return true
- }
- if ex.Index != 1 {
- return true
- }
- recv, ok := ex.Tuple.(*ir.Recv)
- if !ok {
- return true
- }
- call, ok := recv.Chan.(*ir.Call)
- if !ok {
- return true
- }
- fn, ok := call.Common().Value.(*ir.Function)
- if !ok {
- return true
- }
- fn2, ok := fn.Object().(*types.Func)
- if !ok {
- return true
- }
- if fn2.FullName() != "time.Tick" {
- return true
- }
- default:
- // we've reached the exit block
- return true
- }
- }
- }
- }
- return false
-}
diff --git a/vendor/honnef.co/go/tools/go/types/typeutil/callee.go b/vendor/honnef.co/go/tools/go/types/typeutil/callee.go
deleted file mode 100644
index 38f596daf9e..00000000000
--- a/vendor/honnef.co/go/tools/go/types/typeutil/callee.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package typeutil
-
-import (
- "go/ast"
- "go/types"
-
- "golang.org/x/tools/go/ast/astutil"
-)
-
-// Callee returns the named target of a function call, if any:
-// a function, method, builtin, or variable.
-func Callee(info *types.Info, call *ast.CallExpr) types.Object {
- var obj types.Object
- switch fun := astutil.Unparen(call.Fun).(type) {
- case *ast.Ident:
- obj = info.Uses[fun] // type, var, builtin, or declared func
- case *ast.SelectorExpr:
- if sel, ok := info.Selections[fun]; ok {
- obj = sel.Obj() // method or field
- } else {
- obj = info.Uses[fun.Sel] // qualified identifier?
- }
- }
- if _, ok := obj.(*types.TypeName); ok {
- return nil // T(x) is a conversion, not a call
- }
- return obj
-}
-
-// StaticCallee returns the target (function or method) of a static
-// function call, if any. It returns nil for calls to builtins.
-func StaticCallee(info *types.Info, call *ast.CallExpr) *types.Func {
- if f, ok := Callee(info, call).(*types.Func); ok && !interfaceMethod(f) {
- return f
- }
- return nil
-}
-
-func interfaceMethod(f *types.Func) bool {
- recv := f.Type().(*types.Signature).Recv()
- return recv != nil && types.IsInterface(recv.Type())
-}
diff --git a/vendor/honnef.co/go/tools/go/types/typeutil/identical.go b/vendor/honnef.co/go/tools/go/types/typeutil/identical.go
deleted file mode 100644
index c0ca441c327..00000000000
--- a/vendor/honnef.co/go/tools/go/types/typeutil/identical.go
+++ /dev/null
@@ -1,75 +0,0 @@
-package typeutil
-
-import (
- "go/types"
-)
-
-// Identical reports whether x and y are identical types.
-// Unlike types.Identical, receivers of Signature types are not ignored.
-// Unlike types.Identical, interfaces are compared via pointer equality (except for the empty interface, which gets deduplicated).
-// Unlike types.Identical, structs are compared via pointer equality.
-func Identical(x, y types.Type) (ret bool) {
- if !types.Identical(x, y) {
- return false
- }
-
- switch x := x.(type) {
- case *types.Struct:
- y, ok := y.(*types.Struct)
- if !ok {
- // should be impossible
- return true
- }
- return x == y
- case *types.Interface:
- // The issue with interfaces, typeutil.Map and types.Identical
- //
- // types.Identical, when comparing two interfaces, only looks at the set
- // of all methods, not differentiating between implicit (embedded) and
- // explicit methods.
- //
- // When we see the following two types, in source order
- //
- // type I1 interface { foo() }
- // type I2 interface { I1 }
- //
- // then we will first correctly process I1 and its underlying type. When
- // we get to I2, we will see that its underlying type is identical to
- // that of I1 and not process it again. This, however, means that we will
- // not record the fact that I2 embeds I1. If only I2 is reachable via the
- // graph root, then I1 will not be considered used.
- //
- // We choose to be lazy and compare interfaces by their
- // pointers. This will obviously miss identical interfaces,
- // but this only has a runtime cost, it doesn't affect
- // correctness.
- y, ok := y.(*types.Interface)
- if !ok {
- // should be impossible
- return true
- }
- if x.NumEmbeddeds() == 0 &&
- y.NumEmbeddeds() == 0 &&
- x.NumMethods() == 0 &&
- y.NumMethods() == 0 {
- // all truly empty interfaces are the same
- return true
- }
- return x == y
- case *types.Signature:
- y, ok := y.(*types.Signature)
- if !ok {
- // should be impossible
- return true
- }
- if x.Recv() == y.Recv() {
- return true
- }
- if x.Recv() == nil || y.Recv() == nil {
- return false
- }
- return Identical(x.Recv().Type(), y.Recv().Type())
- default:
- return true
- }
-}
diff --git a/vendor/honnef.co/go/tools/go/types/typeutil/imports.go b/vendor/honnef.co/go/tools/go/types/typeutil/imports.go
deleted file mode 100644
index 9c441dba9c0..00000000000
--- a/vendor/honnef.co/go/tools/go/types/typeutil/imports.go
+++ /dev/null
@@ -1,31 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package typeutil
-
-import "go/types"
-
-// Dependencies returns all dependencies of the specified packages.
-//
-// Dependent packages appear in topological order: if package P imports
-// package Q, Q appears earlier than P in the result.
-// The algorithm follows import statements in the order they
-// appear in the source code, so the result is a total order.
-//
-func Dependencies(pkgs ...*types.Package) []*types.Package {
- var result []*types.Package
- seen := make(map[*types.Package]bool)
- var visit func(pkgs []*types.Package)
- visit = func(pkgs []*types.Package) {
- for _, p := range pkgs {
- if !seen[p] {
- seen[p] = true
- visit(p.Imports())
- result = append(result, p)
- }
- }
- }
- visit(pkgs)
- return result
-}
diff --git a/vendor/honnef.co/go/tools/go/types/typeutil/map.go b/vendor/honnef.co/go/tools/go/types/typeutil/map.go
deleted file mode 100644
index f929353ccbd..00000000000
--- a/vendor/honnef.co/go/tools/go/types/typeutil/map.go
+++ /dev/null
@@ -1,319 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package typeutil defines various utilities for types, such as Map,
-// a mapping from types.Type to interface{} values.
-package typeutil
-
-import (
- "bytes"
- "fmt"
- "go/types"
- "reflect"
-)
-
-// Map is a hash-table-based mapping from types (types.Type) to
-// arbitrary interface{} values. The concrete types that implement
-// the Type interface are pointers. Since they are not canonicalized,
-// == cannot be used to check for equivalence, and thus we cannot
-// simply use a Go map.
-//
-// Just as with map[K]V, a nil *Map is a valid empty map.
-//
-// Not thread-safe.
-//
-// This fork handles Signatures correctly, respecting method
-// receivers. Furthermore, it doesn't deduplicate interfaces or
-// structs. Interfaces aren't deduplicated as not to conflate implicit
-// and explicit methods. Structs aren't deduplicated because we track
-// fields of each type separately.
-//
-type Map struct {
- hasher Hasher // shared by many Maps
- table map[uint32][]entry // maps hash to bucket; entry.key==nil means unused
- length int // number of map entries
-}
-
-// entry is an entry (key/value association) in a hash bucket.
-type entry struct {
- key types.Type
- value interface{}
-}
-
-// SetHasher sets the hasher used by Map.
-//
-// All Hashers are functionally equivalent but contain internal state
-// used to cache the results of hashing previously seen types.
-//
-// A single Hasher created by MakeHasher() may be shared among many
-// Maps. This is recommended if the instances have many keys in
-// common, as it will amortize the cost of hash computation.
-//
-// A Hasher may grow without bound as new types are seen. Even when a
-// type is deleted from the map, the Hasher never shrinks, since other
-// types in the map may reference the deleted type indirectly.
-//
-// Hashers are not thread-safe, and read-only operations such as
-// Map.Lookup require updates to the hasher, so a full Mutex lock (not a
-// read-lock) is require around all Map operations if a shared
-// hasher is accessed from multiple threads.
-//
-// If SetHasher is not called, the Map will create a private hasher at
-// the first call to Insert.
-//
-func (m *Map) SetHasher(hasher Hasher) {
- m.hasher = hasher
-}
-
-// Delete removes the entry with the given key, if any.
-// It returns true if the entry was found.
-//
-func (m *Map) Delete(key types.Type) bool {
- if m != nil && m.table != nil {
- hash := m.hasher.Hash(key)
- bucket := m.table[hash]
- for i, e := range bucket {
- if e.key != nil && Identical(key, e.key) {
- // We can't compact the bucket as it
- // would disturb iterators.
- bucket[i] = entry{}
- m.length--
- return true
- }
- }
- }
- return false
-}
-
-// At returns the map entry for the given key.
-// The result is nil if the entry is not present.
-//
-func (m *Map) At(key types.Type) interface{} {
- if m != nil && m.table != nil {
- for _, e := range m.table[m.hasher.Hash(key)] {
- if e.key != nil && Identical(key, e.key) {
- return e.value
- }
- }
- }
- return nil
-}
-
-// Set sets the map entry for key to val,
-// and returns the previous entry, if any.
-func (m *Map) Set(key types.Type, value interface{}) (prev interface{}) {
- if m.table != nil {
- hash := m.hasher.Hash(key)
- bucket := m.table[hash]
- var hole *entry
- for i, e := range bucket {
- if e.key == nil {
- hole = &bucket[i]
- } else if Identical(key, e.key) {
- prev = e.value
- bucket[i].value = value
- return
- }
- }
-
- if hole != nil {
- *hole = entry{key, value} // overwrite deleted entry
- } else {
- m.table[hash] = append(bucket, entry{key, value})
- }
- } else {
- if m.hasher.memo == nil {
- m.hasher = MakeHasher()
- }
- hash := m.hasher.Hash(key)
- m.table = map[uint32][]entry{hash: {entry{key, value}}}
- }
-
- m.length++
- return
-}
-
-// Len returns the number of map entries.
-func (m *Map) Len() int {
- if m != nil {
- return m.length
- }
- return 0
-}
-
-// Iterate calls function f on each entry in the map in unspecified order.
-//
-// If f should mutate the map, Iterate provides the same guarantees as
-// Go maps: if f deletes a map entry that Iterate has not yet reached,
-// f will not be invoked for it, but if f inserts a map entry that
-// Iterate has not yet reached, whether or not f will be invoked for
-// it is unspecified.
-//
-func (m *Map) Iterate(f func(key types.Type, value interface{})) {
- if m != nil {
- for _, bucket := range m.table {
- for _, e := range bucket {
- if e.key != nil {
- f(e.key, e.value)
- }
- }
- }
- }
-}
-
-// Keys returns a new slice containing the set of map keys.
-// The order is unspecified.
-func (m *Map) Keys() []types.Type {
- keys := make([]types.Type, 0, m.Len())
- m.Iterate(func(key types.Type, _ interface{}) {
- keys = append(keys, key)
- })
- return keys
-}
-
-func (m *Map) toString(values bool) string {
- if m == nil {
- return "{}"
- }
- var buf bytes.Buffer
- fmt.Fprint(&buf, "{")
- sep := ""
- m.Iterate(func(key types.Type, value interface{}) {
- fmt.Fprint(&buf, sep)
- sep = ", "
- fmt.Fprint(&buf, key)
- if values {
- fmt.Fprintf(&buf, ": %q", value)
- }
- })
- fmt.Fprint(&buf, "}")
- return buf.String()
-}
-
-// String returns a string representation of the map's entries.
-// Values are printed using fmt.Sprintf("%v", v).
-// Order is unspecified.
-//
-func (m *Map) String() string {
- return m.toString(true)
-}
-
-// KeysString returns a string representation of the map's key set.
-// Order is unspecified.
-//
-func (m *Map) KeysString() string {
- return m.toString(false)
-}
-
-////////////////////////////////////////////////////////////////////////
-// Hasher
-
-// A Hasher maps each type to its hash value.
-// For efficiency, a hasher uses memoization; thus its memory
-// footprint grows monotonically over time.
-// Hashers are not thread-safe.
-// Hashers have reference semantics.
-// Call MakeHasher to create a Hasher.
-type Hasher struct {
- memo map[types.Type]uint32
-}
-
-// MakeHasher returns a new Hasher instance.
-func MakeHasher() Hasher {
- return Hasher{make(map[types.Type]uint32)}
-}
-
-// Hash computes a hash value for the given type t such that
-// Identical(t, t') => Hash(t) == Hash(t').
-func (h Hasher) Hash(t types.Type) uint32 {
- hash, ok := h.memo[t]
- if !ok {
- hash = h.hashFor(t)
- h.memo[t] = hash
- }
- return hash
-}
-
-// hashString computes the Fowler–Noll–Vo hash of s.
-func hashString(s string) uint32 {
- var h uint32
- for i := 0; i < len(s); i++ {
- h ^= uint32(s[i])
- h *= 16777619
- }
- return h
-}
-
-// hashFor computes the hash of t.
-func (h Hasher) hashFor(t types.Type) uint32 {
- // See Identical for rationale.
- switch t := t.(type) {
- case *types.Basic:
- return uint32(t.Kind())
-
- case *types.Array:
- return 9043 + 2*uint32(t.Len()) + 3*h.Hash(t.Elem())
-
- case *types.Slice:
- return 9049 + 2*h.Hash(t.Elem())
-
- case *types.Struct:
- var hash uint32 = 9059
- for i, n := 0, t.NumFields(); i < n; i++ {
- f := t.Field(i)
- if f.Anonymous() {
- hash += 8861
- }
- hash += hashString(t.Tag(i))
- hash += hashString(f.Name()) // (ignore f.Pkg)
- hash += h.Hash(f.Type())
- }
- return hash
-
- case *types.Pointer:
- return 9067 + 2*h.Hash(t.Elem())
-
- case *types.Signature:
- var hash uint32 = 9091
- if t.Variadic() {
- hash *= 8863
- }
- return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results())
-
- case *types.Interface:
- var hash uint32 = 9103
- for i, n := 0, t.NumMethods(); i < n; i++ {
- // See go/types.identicalMethods for rationale.
- // Method order is not significant.
- // Ignore m.Pkg().
- m := t.Method(i)
- hash += 3*hashString(m.Name()) + 5*h.Hash(m.Type())
- }
- return hash
-
- case *types.Map:
- return 9109 + 2*h.Hash(t.Key()) + 3*h.Hash(t.Elem())
-
- case *types.Chan:
- return 9127 + 2*uint32(t.Dir()) + 3*h.Hash(t.Elem())
-
- case *types.Named:
- // Not safe with a copying GC; objects may move.
- return uint32(reflect.ValueOf(t.Obj()).Pointer())
-
- case *types.Tuple:
- return h.hashTuple(t)
- }
- panic(t)
-}
-
-func (h Hasher) hashTuple(tuple *types.Tuple) uint32 {
- // See go/types.identicalTypes for rationale.
- n := tuple.Len()
- var hash uint32 = 9137 + 2*uint32(n)
- for i := 0; i < n; i++ {
- hash += 3 * h.Hash(tuple.At(i).Type())
- }
- return hash
-}
diff --git a/vendor/honnef.co/go/tools/go/types/typeutil/methodsetcache.go b/vendor/honnef.co/go/tools/go/types/typeutil/methodsetcache.go
deleted file mode 100644
index 32084610f49..00000000000
--- a/vendor/honnef.co/go/tools/go/types/typeutil/methodsetcache.go
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file implements a cache of method sets.
-
-package typeutil
-
-import (
- "go/types"
- "sync"
-)
-
-// A MethodSetCache records the method set of each type T for which
-// MethodSet(T) is called so that repeat queries are fast.
-// The zero value is a ready-to-use cache instance.
-type MethodSetCache struct {
- mu sync.Mutex
- named map[*types.Named]struct{ value, pointer *types.MethodSet } // method sets for named N and *N
- others map[types.Type]*types.MethodSet // all other types
-}
-
-// MethodSet returns the method set of type T. It is thread-safe.
-//
-// If cache is nil, this function is equivalent to types.NewMethodSet(T).
-// Utility functions can thus expose an optional *MethodSetCache
-// parameter to clients that care about performance.
-//
-func (cache *MethodSetCache) MethodSet(T types.Type) *types.MethodSet {
- if cache == nil {
- return types.NewMethodSet(T)
- }
- cache.mu.Lock()
- defer cache.mu.Unlock()
-
- switch T := T.(type) {
- case *types.Named:
- return cache.lookupNamed(T).value
-
- case *types.Pointer:
- if N, ok := T.Elem().(*types.Named); ok {
- return cache.lookupNamed(N).pointer
- }
- }
-
- // all other types
- // (The map uses pointer equivalence, not type identity.)
- mset := cache.others[T]
- if mset == nil {
- mset = types.NewMethodSet(T)
- if cache.others == nil {
- cache.others = make(map[types.Type]*types.MethodSet)
- }
- cache.others[T] = mset
- }
- return mset
-}
-
-func (cache *MethodSetCache) lookupNamed(named *types.Named) struct{ value, pointer *types.MethodSet } {
- if cache.named == nil {
- cache.named = make(map[*types.Named]struct{ value, pointer *types.MethodSet })
- }
- // Avoid recomputing mset(*T) for each distinct Pointer
- // instance whose underlying type is a named type.
- msets, ok := cache.named[named]
- if !ok {
- msets.value = types.NewMethodSet(named)
- msets.pointer = types.NewMethodSet(types.NewPointer(named))
- cache.named[named] = msets
- }
- return msets
-}
diff --git a/vendor/honnef.co/go/tools/go/types/typeutil/ui.go b/vendor/honnef.co/go/tools/go/types/typeutil/ui.go
deleted file mode 100644
index 9849c24cef3..00000000000
--- a/vendor/honnef.co/go/tools/go/types/typeutil/ui.go
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2014 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package typeutil
-
-// This file defines utilities for user interfaces that display types.
-
-import "go/types"
-
-// IntuitiveMethodSet returns the intuitive method set of a type T,
-// which is the set of methods you can call on an addressable value of
-// that type.
-//
-// The result always contains MethodSet(T), and is exactly MethodSet(T)
-// for interface types and for pointer-to-concrete types.
-// For all other concrete types T, the result additionally
-// contains each method belonging to *T if there is no identically
-// named method on T itself.
-//
-// This corresponds to user intuition about method sets;
-// this function is intended only for user interfaces.
-//
-// The order of the result is as for types.MethodSet(T).
-//
-func IntuitiveMethodSet(T types.Type, msets *MethodSetCache) []*types.Selection {
- isPointerToConcrete := func(T types.Type) bool {
- ptr, ok := T.(*types.Pointer)
- return ok && !types.IsInterface(ptr.Elem())
- }
-
- var result []*types.Selection
- mset := msets.MethodSet(T)
- if types.IsInterface(T) || isPointerToConcrete(T) {
- for i, n := 0, mset.Len(); i < n; i++ {
- result = append(result, mset.At(i))
- }
- } else {
- // T is some other concrete type.
- // Report methods of T and *T, preferring those of T.
- pmset := msets.MethodSet(types.NewPointer(T))
- for i, n := 0, pmset.Len(); i < n; i++ {
- meth := pmset.At(i)
- if m := mset.Lookup(meth.Obj().Pkg(), meth.Obj().Name()); m != nil {
- meth = m
- }
- result = append(result, meth)
- }
-
- }
- return result
-}
diff --git a/vendor/honnef.co/go/tools/internal/cache/cache.go b/vendor/honnef.co/go/tools/internal/cache/cache.go
deleted file mode 100644
index 6b41811cf25..00000000000
--- a/vendor/honnef.co/go/tools/internal/cache/cache.go
+++ /dev/null
@@ -1,496 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package cache implements a build artifact cache.
-//
-// This package is a slightly modified fork of Go's
-// cmd/go/internal/cache package.
-package cache
-
-import (
- "bytes"
- "crypto/sha256"
- "encoding/hex"
- "errors"
- "fmt"
- "io"
- "io/ioutil"
- "os"
- "path/filepath"
- "strconv"
- "strings"
- "time"
-
- "honnef.co/go/tools/internal/renameio"
-)
-
-// An ActionID is a cache action key, the hash of a complete description of a
-// repeatable computation (command line, environment variables,
-// input file contents, executable contents).
-type ActionID [HashSize]byte
-
-// An OutputID is a cache output key, the hash of an output of a computation.
-type OutputID [HashSize]byte
-
-// A Cache is a package cache, backed by a file system directory tree.
-type Cache struct {
- dir string
- now func() time.Time
-}
-
-// Open opens and returns the cache in the given directory.
-//
-// It is safe for multiple processes on a single machine to use the
-// same cache directory in a local file system simultaneously.
-// They will coordinate using operating system file locks and may
-// duplicate effort but will not corrupt the cache.
-//
-// However, it is NOT safe for multiple processes on different machines
-// to share a cache directory (for example, if the directory were stored
-// in a network file system). File locking is notoriously unreliable in
-// network file systems and may not suffice to protect the cache.
-//
-func Open(dir string) (*Cache, error) {
- info, err := os.Stat(dir)
- if err != nil {
- return nil, err
- }
- if !info.IsDir() {
- return nil, &os.PathError{Op: "open", Path: dir, Err: fmt.Errorf("not a directory")}
- }
- for i := 0; i < 256; i++ {
- name := filepath.Join(dir, fmt.Sprintf("%02x", i))
- if err := os.MkdirAll(name, 0777); err != nil {
- return nil, err
- }
- }
- c := &Cache{
- dir: dir,
- now: time.Now,
- }
- return c, nil
-}
-
-// fileName returns the name of the file corresponding to the given id.
-func (c *Cache) fileName(id [HashSize]byte, key string) string {
- return filepath.Join(c.dir, fmt.Sprintf("%02x", id[0]), fmt.Sprintf("%x", id)+"-"+key)
-}
-
-var errMissing = errors.New("cache entry not found")
-
-const (
- // action entry file is "v1 \n"
- hexSize = HashSize * 2
- entrySize = 2 + 1 + hexSize + 1 + hexSize + 1 + 20 + 1 + 20 + 1
-)
-
-// verify controls whether to run the cache in verify mode.
-// In verify mode, the cache always returns errMissing from Get
-// but then double-checks in Put that the data being written
-// exactly matches any existing entry. This provides an easy
-// way to detect program behavior that would have been different
-// had the cache entry been returned from Get.
-//
-// verify is enabled by setting the environment variable
-// GODEBUG=gocacheverify=1.
-var verify = false
-
-// DebugTest is set when GODEBUG=gocachetest=1 is in the environment.
-var DebugTest = false
-
-func init() { initEnv() }
-
-func initEnv() {
- verify = false
- debugHash = false
- debug := strings.Split(os.Getenv("GODEBUG"), ",")
- for _, f := range debug {
- if f == "gocacheverify=1" {
- verify = true
- }
- if f == "gocachehash=1" {
- debugHash = true
- }
- if f == "gocachetest=1" {
- DebugTest = true
- }
- }
-}
-
-// Get looks up the action ID in the cache,
-// returning the corresponding output ID and file size, if any.
-// Note that finding an output ID does not guarantee that the
-// saved file for that output ID is still available.
-func (c *Cache) Get(id ActionID) (Entry, error) {
- if verify {
- return Entry{}, errMissing
- }
- return c.get(id)
-}
-
-type Entry struct {
- OutputID OutputID
- Size int64
- Time time.Time
-}
-
-// get is Get but does not respect verify mode, so that Put can use it.
-func (c *Cache) get(id ActionID) (Entry, error) {
- missing := func() (Entry, error) {
- return Entry{}, errMissing
- }
- f, err := os.Open(c.fileName(id, "a"))
- if err != nil {
- return missing()
- }
- defer f.Close()
- entry := make([]byte, entrySize+1) // +1 to detect whether f is too long
- if n, err := io.ReadFull(f, entry); n != entrySize || err != io.ErrUnexpectedEOF {
- return missing()
- }
- if entry[0] != 'v' || entry[1] != '1' || entry[2] != ' ' || entry[3+hexSize] != ' ' || entry[3+hexSize+1+hexSize] != ' ' || entry[3+hexSize+1+hexSize+1+20] != ' ' || entry[entrySize-1] != '\n' {
- return missing()
- }
- eid, entry := entry[3:3+hexSize], entry[3+hexSize:]
- eout, entry := entry[1:1+hexSize], entry[1+hexSize:]
- esize, entry := entry[1:1+20], entry[1+20:]
- //lint:ignore SA4006 See https://github.com/dominikh/go-tools/issues/465
- etime, entry := entry[1:1+20], entry[1+20:]
- var buf [HashSize]byte
- if _, err := hex.Decode(buf[:], eid); err != nil || buf != id {
- return missing()
- }
- if _, err := hex.Decode(buf[:], eout); err != nil {
- return missing()
- }
- i := 0
- for i < len(esize) && esize[i] == ' ' {
- i++
- }
- size, err := strconv.ParseInt(string(esize[i:]), 10, 64)
- if err != nil || size < 0 {
- return missing()
- }
- i = 0
- for i < len(etime) && etime[i] == ' ' {
- i++
- }
- tm, err := strconv.ParseInt(string(etime[i:]), 10, 64)
- if err != nil || tm < 0 {
- return missing()
- }
-
- c.used(c.fileName(id, "a"))
-
- return Entry{buf, size, time.Unix(0, tm)}, nil
-}
-
-// GetFile looks up the action ID in the cache and returns
-// the name of the corresponding data file.
-func (c *Cache) GetFile(id ActionID) (file string, entry Entry, err error) {
- entry, err = c.Get(id)
- if err != nil {
- return "", Entry{}, err
- }
- file = c.OutputFile(entry.OutputID)
- info, err := os.Stat(file)
- if err != nil || info.Size() != entry.Size {
- return "", Entry{}, errMissing
- }
- return file, entry, nil
-}
-
-// GetBytes looks up the action ID in the cache and returns
-// the corresponding output bytes.
-// GetBytes should only be used for data that can be expected to fit in memory.
-func (c *Cache) GetBytes(id ActionID) ([]byte, Entry, error) {
- entry, err := c.Get(id)
- if err != nil {
- return nil, entry, err
- }
- data, _ := ioutil.ReadFile(c.OutputFile(entry.OutputID))
- if sha256.Sum256(data) != entry.OutputID {
- return nil, entry, errMissing
- }
- return data, entry, nil
-}
-
-// OutputFile returns the name of the cache file storing output with the given OutputID.
-func (c *Cache) OutputFile(out OutputID) string {
- file := c.fileName(out, "d")
- c.used(file)
- return file
-}
-
-// Time constants for cache expiration.
-//
-// We set the mtime on a cache file on each use, but at most one per mtimeInterval (1 hour),
-// to avoid causing many unnecessary inode updates. The mtimes therefore
-// roughly reflect "time of last use" but may in fact be older by at most an hour.
-//
-// We scan the cache for entries to delete at most once per trimInterval (1 day).
-//
-// When we do scan the cache, we delete entries that have not been used for
-// at least trimLimit (5 days). Statistics gathered from a month of usage by
-// Go developers found that essentially all reuse of cached entries happened
-// within 5 days of the previous reuse. See golang.org/issue/22990.
-const (
- mtimeInterval = 1 * time.Hour
- trimInterval = 24 * time.Hour
- trimLimit = 5 * 24 * time.Hour
-)
-
-// used makes a best-effort attempt to update mtime on file,
-// so that mtime reflects cache access time.
-//
-// Because the reflection only needs to be approximate,
-// and to reduce the amount of disk activity caused by using
-// cache entries, used only updates the mtime if the current
-// mtime is more than an hour old. This heuristic eliminates
-// nearly all of the mtime updates that would otherwise happen,
-// while still keeping the mtimes useful for cache trimming.
-func (c *Cache) used(file string) {
- info, err := os.Stat(file)
- if err == nil && c.now().Sub(info.ModTime()) < mtimeInterval {
- return
- }
- os.Chtimes(file, c.now(), c.now())
-}
-
-// Trim removes old cache entries that are likely not to be reused.
-func (c *Cache) Trim() {
- now := c.now()
-
- // We maintain in dir/trim.txt the time of the last completed cache trim.
- // If the cache has been trimmed recently enough, do nothing.
- // This is the common case.
- data, _ := renameio.ReadFile(filepath.Join(c.dir, "trim.txt"))
- t, err := strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64)
- if err == nil && now.Sub(time.Unix(t, 0)) < trimInterval {
- return
- }
-
- // Trim each of the 256 subdirectories.
- // We subtract an additional mtimeInterval
- // to account for the imprecision of our "last used" mtimes.
- cutoff := now.Add(-trimLimit - mtimeInterval)
- for i := 0; i < 256; i++ {
- subdir := filepath.Join(c.dir, fmt.Sprintf("%02x", i))
- c.trimSubdir(subdir, cutoff)
- }
-
- // Ignore errors from here: if we don't write the complete timestamp, the
- // cache will appear older than it is, and we'll trim it again next time.
- renameio.WriteFile(filepath.Join(c.dir, "trim.txt"), []byte(fmt.Sprintf("%d", now.Unix())), 0666)
-}
-
-// trimSubdir trims a single cache subdirectory.
-func (c *Cache) trimSubdir(subdir string, cutoff time.Time) {
- // Read all directory entries from subdir before removing
- // any files, in case removing files invalidates the file offset
- // in the directory scan. Also, ignore error from f.Readdirnames,
- // because we don't care about reporting the error and we still
- // want to process any entries found before the error.
- f, err := os.Open(subdir)
- if err != nil {
- return
- }
- names, _ := f.Readdirnames(-1)
- f.Close()
-
- for _, name := range names {
- // Remove only cache entries (xxxx-a and xxxx-d).
- if !strings.HasSuffix(name, "-a") && !strings.HasSuffix(name, "-d") {
- continue
- }
- entry := filepath.Join(subdir, name)
- info, err := os.Stat(entry)
- if err == nil && info.ModTime().Before(cutoff) {
- os.Remove(entry)
- }
- }
-}
-
-// putIndexEntry adds an entry to the cache recording that executing the action
-// with the given id produces an output with the given output id (hash) and size.
-func (c *Cache) putIndexEntry(id ActionID, out OutputID, size int64, allowVerify bool) error {
- // Note: We expect that for one reason or another it may happen
- // that repeating an action produces a different output hash
- // (for example, if the output contains a time stamp or temp dir name).
- // While not ideal, this is also not a correctness problem, so we
- // don't make a big deal about it. In particular, we leave the action
- // cache entries writable specifically so that they can be overwritten.
- //
- // Setting GODEBUG=gocacheverify=1 does make a big deal:
- // in verify mode we are double-checking that the cache entries
- // are entirely reproducible. As just noted, this may be unrealistic
- // in some cases but the check is also useful for shaking out real bugs.
- entry := fmt.Sprintf("v1 %x %x %20d %20d\n", id, out, size, time.Now().UnixNano())
-
- if verify && allowVerify {
- old, err := c.get(id)
- if err == nil && (old.OutputID != out || old.Size != size) {
- // panic to show stack trace, so we can see what code is generating this cache entry.
- msg := fmt.Sprintf("go: internal cache error: cache verify failed: id=%x changed:<<<\n%s\n>>>\nold: %x %d\nnew: %x %d", id, reverseHash(id), out, size, old.OutputID, old.Size)
- panic(msg)
- }
- }
- file := c.fileName(id, "a")
-
- // Copy file to cache directory.
- mode := os.O_WRONLY | os.O_CREATE
- f, err := os.OpenFile(file, mode, 0666)
- if err != nil {
- return err
- }
- _, err = f.WriteString(entry)
- if err == nil {
- // Truncate the file only *after* writing it.
- // (This should be a no-op, but truncate just in case of previous corruption.)
- //
- // This differs from ioutil.WriteFile, which truncates to 0 *before* writing
- // via os.O_TRUNC. Truncating only after writing ensures that a second write
- // of the same content to the same file is idempotent, and does not — even
- // temporarily! — undo the effect of the first write.
- err = f.Truncate(int64(len(entry)))
- }
- if closeErr := f.Close(); err == nil {
- err = closeErr
- }
- if err != nil {
- // TODO(bcmills): This Remove potentially races with another go command writing to file.
- // Can we eliminate it?
- os.Remove(file)
- return err
- }
- os.Chtimes(file, c.now(), c.now()) // mainly for tests
-
- return nil
-}
-
-// Put stores the given output in the cache as the output for the action ID.
-// It may read file twice. The content of file must not change between the two passes.
-func (c *Cache) Put(id ActionID, file io.ReadSeeker) (OutputID, int64, error) {
- return c.put(id, file, true)
-}
-
-// PutNoVerify is like Put but disables the verify check
-// when GODEBUG=goverifycache=1 is set.
-// It is meant for data that is OK to cache but that we expect to vary slightly from run to run,
-// like test output containing times and the like.
-func (c *Cache) PutNoVerify(id ActionID, file io.ReadSeeker) (OutputID, int64, error) {
- return c.put(id, file, false)
-}
-
-func (c *Cache) put(id ActionID, file io.ReadSeeker, allowVerify bool) (OutputID, int64, error) {
- // Compute output ID.
- h := sha256.New()
- if _, err := file.Seek(0, 0); err != nil {
- return OutputID{}, 0, err
- }
- size, err := io.Copy(h, file)
- if err != nil {
- return OutputID{}, 0, err
- }
- var out OutputID
- h.Sum(out[:0])
-
- // Copy to cached output file (if not already present).
- if err := c.copyFile(file, out, size); err != nil {
- return out, size, err
- }
-
- // Add to cache index.
- return out, size, c.putIndexEntry(id, out, size, allowVerify)
-}
-
-// PutBytes stores the given bytes in the cache as the output for the action ID.
-func (c *Cache) PutBytes(id ActionID, data []byte) error {
- _, _, err := c.Put(id, bytes.NewReader(data))
- return err
-}
-
-// copyFile copies file into the cache, expecting it to have the given
-// output ID and size, if that file is not present already.
-func (c *Cache) copyFile(file io.ReadSeeker, out OutputID, size int64) error {
- name := c.fileName(out, "d")
- info, err := os.Stat(name)
- if err == nil && info.Size() == size {
- // Check hash.
- if f, err := os.Open(name); err == nil {
- h := sha256.New()
- io.Copy(h, f)
- f.Close()
- var out2 OutputID
- h.Sum(out2[:0])
- if out == out2 {
- return nil
- }
- }
- // Hash did not match. Fall through and rewrite file.
- }
-
- // Copy file to cache directory.
- mode := os.O_RDWR | os.O_CREATE
- if err == nil && info.Size() > size { // shouldn't happen but fix in case
- mode |= os.O_TRUNC
- }
- f, err := os.OpenFile(name, mode, 0666)
- if err != nil {
- return err
- }
- defer f.Close()
- if size == 0 {
- // File now exists with correct size.
- // Only one possible zero-length file, so contents are OK too.
- // Early return here makes sure there's a "last byte" for code below.
- return nil
- }
-
- // From here on, if any of the I/O writing the file fails,
- // we make a best-effort attempt to truncate the file f
- // before returning, to avoid leaving bad bytes in the file.
-
- // Copy file to f, but also into h to double-check hash.
- if _, err := file.Seek(0, 0); err != nil {
- f.Truncate(0)
- return err
- }
- h := sha256.New()
- w := io.MultiWriter(f, h)
- if _, err := io.CopyN(w, file, size-1); err != nil {
- f.Truncate(0)
- return err
- }
- // Check last byte before writing it; writing it will make the size match
- // what other processes expect to find and might cause them to start
- // using the file.
- buf := make([]byte, 1)
- if _, err := file.Read(buf); err != nil {
- f.Truncate(0)
- return err
- }
- h.Write(buf)
- sum := h.Sum(nil)
- if !bytes.Equal(sum, out[:]) {
- f.Truncate(0)
- return fmt.Errorf("file content changed underfoot")
- }
-
- // Commit cache file entry.
- if _, err := f.Write(buf); err != nil {
- f.Truncate(0)
- return err
- }
- if err := f.Close(); err != nil {
- // Data might not have been written,
- // but file may look like it is the right size.
- // To be extra careful, remove cached file.
- os.Remove(name)
- return err
- }
- os.Chtimes(name, c.now(), c.now()) // mainly for tests
-
- return nil
-}
diff --git a/vendor/honnef.co/go/tools/internal/cache/default.go b/vendor/honnef.co/go/tools/internal/cache/default.go
deleted file mode 100644
index 3034f76a538..00000000000
--- a/vendor/honnef.co/go/tools/internal/cache/default.go
+++ /dev/null
@@ -1,85 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cache
-
-import (
- "fmt"
- "io/ioutil"
- "log"
- "os"
- "path/filepath"
- "sync"
-)
-
-// Default returns the default cache to use.
-func Default() (*Cache, error) {
- defaultOnce.Do(initDefaultCache)
- return defaultCache, defaultDirErr
-}
-
-var (
- defaultOnce sync.Once
- defaultCache *Cache
-)
-
-// cacheREADME is a message stored in a README in the cache directory.
-// Because the cache lives outside the normal Go trees, we leave the
-// README as a courtesy to explain where it came from.
-const cacheREADME = `This directory holds cached build artifacts from staticcheck.
-`
-
-// initDefaultCache does the work of finding the default cache
-// the first time Default is called.
-func initDefaultCache() {
- dir := DefaultDir()
- if err := os.MkdirAll(dir, 0777); err != nil {
- log.Fatalf("failed to initialize build cache at %s: %s\n", dir, err)
- }
- if _, err := os.Stat(filepath.Join(dir, "README")); err != nil {
- // Best effort.
- ioutil.WriteFile(filepath.Join(dir, "README"), []byte(cacheREADME), 0666)
- }
-
- c, err := Open(dir)
- if err != nil {
- log.Fatalf("failed to initialize build cache at %s: %s\n", dir, err)
- }
- defaultCache = c
-}
-
-var (
- defaultDirOnce sync.Once
- defaultDir string
- defaultDirErr error
-)
-
-// DefaultDir returns the effective STATICCHECK_CACHE setting.
-func DefaultDir() string {
- // Save the result of the first call to DefaultDir for later use in
- // initDefaultCache. cmd/go/main.go explicitly sets GOCACHE so that
- // subprocesses will inherit it, but that means initDefaultCache can't
- // otherwise distinguish between an explicit "off" and a UserCacheDir error.
-
- defaultDirOnce.Do(func() {
- defaultDir = os.Getenv("STATICCHECK_CACHE")
- if filepath.IsAbs(defaultDir) {
- return
- }
- if defaultDir != "" {
- defaultDirErr = fmt.Errorf("STATICCHECK_CACHE is not an absolute path")
- return
- }
-
- // Compute default location.
- dir, err := os.UserCacheDir()
- if err != nil {
- defaultDirErr = fmt.Errorf("STATICCHECK_CACHE is not defined and %v", err)
- return
- }
- defaultDir = filepath.Join(dir, "staticcheck")
- })
-
- return defaultDir
-}
diff --git a/vendor/honnef.co/go/tools/internal/cache/hash.go b/vendor/honnef.co/go/tools/internal/cache/hash.go
deleted file mode 100644
index a53543ec501..00000000000
--- a/vendor/honnef.co/go/tools/internal/cache/hash.go
+++ /dev/null
@@ -1,176 +0,0 @@
-// Copyright 2017 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package cache
-
-import (
- "bytes"
- "crypto/sha256"
- "fmt"
- "hash"
- "io"
- "os"
- "sync"
-)
-
-var debugHash = false // set when GODEBUG=gocachehash=1
-
-// HashSize is the number of bytes in a hash.
-const HashSize = 32
-
-// A Hash provides access to the canonical hash function used to index the cache.
-// The current implementation uses salted SHA256, but clients must not assume this.
-type Hash struct {
- h hash.Hash
- name string // for debugging
- buf *bytes.Buffer // for verify
-}
-
-// hashSalt is a salt string added to the beginning of every hash
-// created by NewHash. Using the Staticcheck version makes sure that different
-// versions of the command do not address the same cache
-// entries, so that a bug in one version does not affect the execution
-// of other versions. This salt will result in additional ActionID files
-// in the cache, but not additional copies of the large output files,
-// which are still addressed by unsalted SHA256.
-var hashSalt []byte
-
-func SetSalt(b []byte) {
- hashSalt = b
-}
-
-// Subkey returns an action ID corresponding to mixing a parent
-// action ID with a string description of the subkey.
-func Subkey(parent ActionID, desc string) ActionID {
- h := sha256.New()
- h.Write([]byte("subkey:"))
- h.Write(parent[:])
- h.Write([]byte(desc))
- var out ActionID
- h.Sum(out[:0])
- if debugHash {
- fmt.Fprintf(os.Stderr, "HASH subkey %x %q = %x\n", parent, desc, out)
- }
- if verify {
- hashDebug.Lock()
- hashDebug.m[out] = fmt.Sprintf("subkey %x %q", parent, desc)
- hashDebug.Unlock()
- }
- return out
-}
-
-// NewHash returns a new Hash.
-// The caller is expected to Write data to it and then call Sum.
-func NewHash(name string) *Hash {
- h := &Hash{h: sha256.New(), name: name}
- if debugHash {
- fmt.Fprintf(os.Stderr, "HASH[%s]\n", h.name)
- }
- h.Write(hashSalt)
- if verify {
- h.buf = new(bytes.Buffer)
- }
- return h
-}
-
-// Write writes data to the running hash.
-func (h *Hash) Write(b []byte) (int, error) {
- if debugHash {
- fmt.Fprintf(os.Stderr, "HASH[%s]: %q\n", h.name, b)
- }
- if h.buf != nil {
- h.buf.Write(b)
- }
- return h.h.Write(b)
-}
-
-// Sum returns the hash of the data written previously.
-func (h *Hash) Sum() [HashSize]byte {
- var out [HashSize]byte
- h.h.Sum(out[:0])
- if debugHash {
- fmt.Fprintf(os.Stderr, "HASH[%s]: %x\n", h.name, out)
- }
- if h.buf != nil {
- hashDebug.Lock()
- if hashDebug.m == nil {
- hashDebug.m = make(map[[HashSize]byte]string)
- }
- hashDebug.m[out] = h.buf.String()
- hashDebug.Unlock()
- }
- return out
-}
-
-// In GODEBUG=gocacheverify=1 mode,
-// hashDebug holds the input to every computed hash ID,
-// so that we can work backward from the ID involved in a
-// cache entry mismatch to a description of what should be there.
-var hashDebug struct {
- sync.Mutex
- m map[[HashSize]byte]string
-}
-
-// reverseHash returns the input used to compute the hash id.
-func reverseHash(id [HashSize]byte) string {
- hashDebug.Lock()
- s := hashDebug.m[id]
- hashDebug.Unlock()
- return s
-}
-
-var hashFileCache struct {
- sync.Mutex
- m map[string][HashSize]byte
-}
-
-// FileHash returns the hash of the named file.
-// It caches repeated lookups for a given file,
-// and the cache entry for a file can be initialized
-// using SetFileHash.
-// The hash used by FileHash is not the same as
-// the hash used by NewHash.
-func FileHash(file string) ([HashSize]byte, error) {
- hashFileCache.Lock()
- out, ok := hashFileCache.m[file]
- hashFileCache.Unlock()
-
- if ok {
- return out, nil
- }
-
- h := sha256.New()
- f, err := os.Open(file)
- if err != nil {
- if debugHash {
- fmt.Fprintf(os.Stderr, "HASH %s: %v\n", file, err)
- }
- return [HashSize]byte{}, err
- }
- _, err = io.Copy(h, f)
- f.Close()
- if err != nil {
- if debugHash {
- fmt.Fprintf(os.Stderr, "HASH %s: %v\n", file, err)
- }
- return [HashSize]byte{}, err
- }
- h.Sum(out[:0])
- if debugHash {
- fmt.Fprintf(os.Stderr, "HASH %s: %x\n", file, out)
- }
-
- SetFileHash(file, out)
- return out, nil
-}
-
-// SetFileHash sets the hash returned by FileHash for file.
-func SetFileHash(file string, sum [HashSize]byte) {
- hashFileCache.Lock()
- if hashFileCache.m == nil {
- hashFileCache.m = make(map[string][HashSize]byte)
- }
- hashFileCache.m[file] = sum
- hashFileCache.Unlock()
-}
diff --git a/vendor/honnef.co/go/tools/internal/passes/buildir/buildir.go b/vendor/honnef.co/go/tools/internal/passes/buildir/buildir.go
deleted file mode 100644
index 39469770248..00000000000
--- a/vendor/honnef.co/go/tools/internal/passes/buildir/buildir.go
+++ /dev/null
@@ -1,113 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package buildir defines an Analyzer that constructs the IR
-// of an error-free package and returns the set of all
-// functions within it. It does not report any diagnostics itself but
-// may be used as an input to other analyzers.
-//
-// THIS INTERFACE IS EXPERIMENTAL AND MAY BE SUBJECT TO INCOMPATIBLE CHANGE.
-package buildir
-
-import (
- "go/ast"
- "go/types"
- "reflect"
-
- "golang.org/x/tools/go/analysis"
- "honnef.co/go/tools/ir"
-)
-
-type willExit struct{}
-type willUnwind struct{}
-
-func (*willExit) AFact() {}
-func (*willUnwind) AFact() {}
-
-var Analyzer = &analysis.Analyzer{
- Name: "buildir",
- Doc: "build IR for later passes",
- Run: run,
- ResultType: reflect.TypeOf(new(IR)),
- FactTypes: []analysis.Fact{new(willExit), new(willUnwind)},
-}
-
-// IR provides intermediate representation for all the
-// non-blank source functions in the current package.
-type IR struct {
- Pkg *ir.Package
- SrcFuncs []*ir.Function
-}
-
-func run(pass *analysis.Pass) (interface{}, error) {
- // Plundered from ssautil.BuildPackage.
-
- // We must create a new Program for each Package because the
- // analysis API provides no place to hang a Program shared by
- // all Packages. Consequently, IR Packages and Functions do not
- // have a canonical representation across an analysis session of
- // multiple packages. This is unlikely to be a problem in
- // practice because the analysis API essentially forces all
- // packages to be analysed independently, so any given call to
- // Analysis.Run on a package will see only IR objects belonging
- // to a single Program.
-
- mode := ir.GlobalDebug
-
- prog := ir.NewProgram(pass.Fset, mode)
-
- // Create IR packages for all imports.
- // Order is not significant.
- created := make(map[*types.Package]bool)
- var createAll func(pkgs []*types.Package)
- createAll = func(pkgs []*types.Package) {
- for _, p := range pkgs {
- if !created[p] {
- created[p] = true
- irpkg := prog.CreatePackage(p, nil, nil, true)
- for _, fn := range irpkg.Functions {
- if ast.IsExported(fn.Name()) {
- var exit willExit
- var unwind willUnwind
- if pass.ImportObjectFact(fn.Object(), &exit) {
- fn.WillExit = true
- }
- if pass.ImportObjectFact(fn.Object(), &unwind) {
- fn.WillUnwind = true
- }
- }
- }
- createAll(p.Imports())
- }
- }
- }
- createAll(pass.Pkg.Imports())
-
- // Create and build the primary package.
- irpkg := prog.CreatePackage(pass.Pkg, pass.Files, pass.TypesInfo, false)
- irpkg.Build()
-
- // Compute list of source functions, including literals,
- // in source order.
- var addAnons func(f *ir.Function)
- funcs := make([]*ir.Function, len(irpkg.Functions))
- copy(funcs, irpkg.Functions)
- addAnons = func(f *ir.Function) {
- for _, anon := range f.AnonFuncs {
- funcs = append(funcs, anon)
- addAnons(anon)
- }
- }
- for _, fn := range irpkg.Functions {
- addAnons(fn)
- if fn.WillExit {
- pass.ExportObjectFact(fn.Object(), new(willExit))
- }
- if fn.WillUnwind {
- pass.ExportObjectFact(fn.Object(), new(willUnwind))
- }
- }
-
- return &IR{Pkg: irpkg, SrcFuncs: funcs}, nil
-}
diff --git a/vendor/honnef.co/go/tools/internal/renameio/renameio.go b/vendor/honnef.co/go/tools/internal/renameio/renameio.go
deleted file mode 100644
index a279d1a1eba..00000000000
--- a/vendor/honnef.co/go/tools/internal/renameio/renameio.go
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2018 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package renameio writes files atomically by renaming temporary files.
-package renameio
-
-import (
- "bytes"
- "io"
- "math/rand"
- "os"
- "path/filepath"
- "strconv"
-
- "honnef.co/go/tools/internal/robustio"
-)
-
-const patternSuffix = ".tmp"
-
-// Pattern returns a glob pattern that matches the unrenamed temporary files
-// created when writing to filename.
-func Pattern(filename string) string {
- return filepath.Join(filepath.Dir(filename), filepath.Base(filename)+patternSuffix)
-}
-
-// WriteFile is like ioutil.WriteFile, but first writes data to an arbitrary
-// file in the same directory as filename, then renames it atomically to the
-// final name.
-//
-// That ensures that the final location, if it exists, is always a complete file.
-func WriteFile(filename string, data []byte, perm os.FileMode) (err error) {
- return WriteToFile(filename, bytes.NewReader(data), perm)
-}
-
-// WriteToFile is a variant of WriteFile that accepts the data as an io.Reader
-// instead of a slice.
-func WriteToFile(filename string, data io.Reader, perm os.FileMode) (err error) {
- f, err := tempFile(filepath.Dir(filename), filepath.Base(filename), perm)
- if err != nil {
- return err
- }
- defer func() {
- // Only call os.Remove on f.Name() if we failed to rename it: otherwise,
- // some other process may have created a new file with the same name after
- // that.
- if err != nil {
- f.Close()
- os.Remove(f.Name())
- }
- }()
-
- if _, err := io.Copy(f, data); err != nil {
- return err
- }
- // Sync the file before renaming it: otherwise, after a crash the reader may
- // observe a 0-length file instead of the actual contents.
- // See https://golang.org/issue/22397#issuecomment-380831736.
- if err := f.Sync(); err != nil {
- return err
- }
- if err := f.Close(); err != nil {
- return err
- }
-
- return robustio.Rename(f.Name(), filename)
-}
-
-// tempFile creates a new temporary file with given permission bits.
-func tempFile(dir, prefix string, perm os.FileMode) (f *os.File, err error) {
- for i := 0; i < 10000; i++ {
- name := filepath.Join(dir, prefix+strconv.Itoa(rand.Intn(1000000000))+patternSuffix)
- f, err = os.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, perm)
- if os.IsExist(err) {
- continue
- }
- break
- }
- return
-}
-
-// ReadFile is like ioutil.ReadFile, but on Windows retries spurious errors that
-// may occur if the file is concurrently replaced.
-//
-// Errors are classified heuristically and retries are bounded, so even this
-// function may occasionally return a spurious error on Windows.
-// If so, the error will likely wrap one of:
-// - syscall.ERROR_ACCESS_DENIED
-// - syscall.ERROR_FILE_NOT_FOUND
-// - internal/syscall/windows.ERROR_SHARING_VIOLATION
-func ReadFile(filename string) ([]byte, error) {
- return robustio.ReadFile(filename)
-}
diff --git a/vendor/honnef.co/go/tools/internal/robustio/robustio.go b/vendor/honnef.co/go/tools/internal/robustio/robustio.go
deleted file mode 100644
index 76e47ad1ffa..00000000000
--- a/vendor/honnef.co/go/tools/internal/robustio/robustio.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package robustio wraps I/O functions that are prone to failure on Windows,
-// transparently retrying errors up to an arbitrary timeout.
-//
-// Errors are classified heuristically and retries are bounded, so the functions
-// in this package do not completely eliminate spurious errors. However, they do
-// significantly reduce the rate of failure in practice.
-//
-// If so, the error will likely wrap one of:
-// The functions in this package do not completely eliminate spurious errors,
-// but substantially reduce their rate of occurrence in practice.
-package robustio
-
-// Rename is like os.Rename, but on Windows retries errors that may occur if the
-// file is concurrently read or overwritten.
-//
-// (See golang.org/issue/31247 and golang.org/issue/32188.)
-func Rename(oldpath, newpath string) error {
- return rename(oldpath, newpath)
-}
-
-// ReadFile is like ioutil.ReadFile, but on Windows retries errors that may
-// occur if the file is concurrently replaced.
-//
-// (See golang.org/issue/31247 and golang.org/issue/32188.)
-func ReadFile(filename string) ([]byte, error) {
- return readFile(filename)
-}
-
-// RemoveAll is like os.RemoveAll, but on Windows retries errors that may occur
-// if an executable file in the directory has recently been executed.
-//
-// (See golang.org/issue/19491.)
-func RemoveAll(path string) error {
- return removeAll(path)
-}
-
-// IsEphemeralError reports whether err is one of the errors that the functions
-// in this package attempt to mitigate.
-//
-// Errors considered ephemeral include:
-// - syscall.ERROR_ACCESS_DENIED
-// - syscall.ERROR_FILE_NOT_FOUND
-// - internal/syscall/windows.ERROR_SHARING_VIOLATION
-//
-// This set may be expanded in the future; programs must not rely on the
-// non-ephemerality of any given error.
-func IsEphemeralError(err error) bool {
- return isEphemeralError(err)
-}
diff --git a/vendor/honnef.co/go/tools/internal/robustio/robustio_darwin.go b/vendor/honnef.co/go/tools/internal/robustio/robustio_darwin.go
deleted file mode 100644
index 1ac0d10d7f1..00000000000
--- a/vendor/honnef.co/go/tools/internal/robustio/robustio_darwin.go
+++ /dev/null
@@ -1,29 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package robustio
-
-import (
- "os"
- "syscall"
-)
-
-const errFileNotFound = syscall.ENOENT
-
-// isEphemeralError returns true if err may be resolved by waiting.
-func isEphemeralError(err error) bool {
- switch werr := err.(type) {
- case *os.PathError:
- err = werr.Err
- case *os.LinkError:
- err = werr.Err
- case *os.SyscallError:
- err = werr.Err
-
- }
- if errno, ok := err.(syscall.Errno); ok {
- return errno == errFileNotFound
- }
- return false
-}
diff --git a/vendor/honnef.co/go/tools/internal/robustio/robustio_flaky.go b/vendor/honnef.co/go/tools/internal/robustio/robustio_flaky.go
deleted file mode 100644
index e0bf5b9b3b9..00000000000
--- a/vendor/honnef.co/go/tools/internal/robustio/robustio_flaky.go
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build windows darwin
-
-package robustio
-
-import (
- "io/ioutil"
- "math/rand"
- "os"
- "syscall"
- "time"
-)
-
-const arbitraryTimeout = 500 * time.Millisecond
-
-const ERROR_SHARING_VIOLATION = 32
-
-// retry retries ephemeral errors from f up to an arbitrary timeout
-// to work around filesystem flakiness on Windows and Darwin.
-func retry(f func() (err error, mayRetry bool)) error {
- var (
- bestErr error
- lowestErrno syscall.Errno
- start time.Time
- nextSleep time.Duration = 1 * time.Millisecond
- )
- for {
- err, mayRetry := f()
- if err == nil || !mayRetry {
- return err
- }
-
- if errno, ok := err.(syscall.Errno); ok && (lowestErrno == 0 || errno < lowestErrno) {
- bestErr = err
- lowestErrno = errno
- } else if bestErr == nil {
- bestErr = err
- }
-
- if start.IsZero() {
- start = time.Now()
- } else if d := time.Since(start) + nextSleep; d >= arbitraryTimeout {
- break
- }
- time.Sleep(nextSleep)
- nextSleep += time.Duration(rand.Int63n(int64(nextSleep)))
- }
-
- return bestErr
-}
-
-// rename is like os.Rename, but retries ephemeral errors.
-//
-// On windows it wraps os.Rename, which (as of 2019-06-04) uses MoveFileEx with
-// MOVEFILE_REPLACE_EXISTING.
-//
-// Windows also provides a different system call, ReplaceFile,
-// that provides similar semantics, but perhaps preserves more metadata. (The
-// documentation on the differences between the two is very sparse.)
-//
-// Empirical error rates with MoveFileEx are lower under modest concurrency, so
-// for now we're sticking with what the os package already provides.
-func rename(oldpath, newpath string) (err error) {
- return retry(func() (err error, mayRetry bool) {
- err = os.Rename(oldpath, newpath)
- return err, isEphemeralError(err)
- })
-}
-
-// readFile is like ioutil.ReadFile, but retries ephemeral errors.
-func readFile(filename string) ([]byte, error) {
- var b []byte
- err := retry(func() (err error, mayRetry bool) {
- b, err = ioutil.ReadFile(filename)
-
- // Unlike in rename, we do not retry errFileNotFound here: it can occur
- // as a spurious error, but the file may also genuinely not exist, so the
- // increase in robustness is probably not worth the extra latency.
-
- return err, isEphemeralError(err) && err != errFileNotFound
- })
- return b, err
-}
-
-func removeAll(path string) error {
- return retry(func() (err error, mayRetry bool) {
- err = os.RemoveAll(path)
- return err, isEphemeralError(err)
- })
-}
diff --git a/vendor/honnef.co/go/tools/internal/robustio/robustio_other.go b/vendor/honnef.co/go/tools/internal/robustio/robustio_other.go
deleted file mode 100644
index a2428856f2e..00000000000
--- a/vendor/honnef.co/go/tools/internal/robustio/robustio_other.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//+build !windows,!darwin
-
-package robustio
-
-import (
- "io/ioutil"
- "os"
-)
-
-func rename(oldpath, newpath string) error {
- return os.Rename(oldpath, newpath)
-}
-
-func readFile(filename string) ([]byte, error) {
- return ioutil.ReadFile(filename)
-}
-
-func removeAll(path string) error {
- return os.RemoveAll(path)
-}
-
-func isEphemeralError(err error) bool {
- return false
-}
diff --git a/vendor/honnef.co/go/tools/internal/robustio/robustio_windows.go b/vendor/honnef.co/go/tools/internal/robustio/robustio_windows.go
deleted file mode 100644
index a35237d44ae..00000000000
--- a/vendor/honnef.co/go/tools/internal/robustio/robustio_windows.go
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package robustio
-
-import (
- "os"
- "syscall"
-)
-
-const errFileNotFound = syscall.ERROR_FILE_NOT_FOUND
-
-// isEphemeralError returns true if err may be resolved by waiting.
-func isEphemeralError(err error) bool {
- switch werr := err.(type) {
- case *os.PathError:
- err = werr.Err
- case *os.LinkError:
- err = werr.Err
- case *os.SyscallError:
- err = werr.Err
- }
- if errno, ok := err.(syscall.Errno); ok {
- switch errno {
- case syscall.ERROR_ACCESS_DENIED,
- syscall.ERROR_FILE_NOT_FOUND,
- ERROR_SHARING_VIOLATION:
- return true
- }
- }
- return false
-}
diff --git a/vendor/honnef.co/go/tools/internal/sharedcheck/lint.go b/vendor/honnef.co/go/tools/internal/sharedcheck/lint.go
deleted file mode 100644
index e9abf0d893e..00000000000
--- a/vendor/honnef.co/go/tools/internal/sharedcheck/lint.go
+++ /dev/null
@@ -1,71 +0,0 @@
-package sharedcheck
-
-import (
- "go/ast"
- "go/types"
-
- "golang.org/x/tools/go/analysis"
- "honnef.co/go/tools/code"
- "honnef.co/go/tools/internal/passes/buildir"
- "honnef.co/go/tools/ir"
- . "honnef.co/go/tools/lint/lintdsl"
-)
-
-func CheckRangeStringRunes(pass *analysis.Pass) (interface{}, error) {
- for _, fn := range pass.ResultOf[buildir.Analyzer].(*buildir.IR).SrcFuncs {
- cb := func(node ast.Node) bool {
- rng, ok := node.(*ast.RangeStmt)
- if !ok || !code.IsBlank(rng.Key) {
- return true
- }
-
- v, _ := fn.ValueForExpr(rng.X)
-
- // Check that we're converting from string to []rune
- val, _ := v.(*ir.Convert)
- if val == nil {
- return true
- }
- Tsrc, ok := val.X.Type().(*types.Basic)
- if !ok || Tsrc.Kind() != types.String {
- return true
- }
- Tdst, ok := val.Type().(*types.Slice)
- if !ok {
- return true
- }
- TdstElem, ok := Tdst.Elem().(*types.Basic)
- if !ok || TdstElem.Kind() != types.Int32 {
- return true
- }
-
- // Check that the result of the conversion is only used to
- // range over
- refs := val.Referrers()
- if refs == nil {
- return true
- }
-
- // Expect two refs: one for obtaining the length of the slice,
- // one for accessing the elements
- if len(code.FilterDebug(*refs)) != 2 {
- // TODO(dh): right now, we check that only one place
- // refers to our slice. This will miss cases such as
- // ranging over the slice twice. Ideally, we'd ensure that
- // the slice is only used for ranging over (without
- // accessing the key), but that is harder to do because in
- // IR form, ranging over a slice looks like an ordinary
- // loop with index increments and slice accesses. We'd
- // have to look at the associated AST node to check that
- // it's a range statement.
- return true
- }
-
- pass.Reportf(rng.Pos(), "should range over string, not []rune(string)")
-
- return true
- }
- Inspect(fn.Source(), cb)
- }
- return nil, nil
-}
diff --git a/vendor/honnef.co/go/tools/ir/LICENSE b/vendor/honnef.co/go/tools/ir/LICENSE
deleted file mode 100644
index aee48041e11..00000000000
--- a/vendor/honnef.co/go/tools/ir/LICENSE
+++ /dev/null
@@ -1,28 +0,0 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
-Copyright (c) 2016 Dominik Honnef. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/honnef.co/go/tools/ir/blockopt.go b/vendor/honnef.co/go/tools/ir/blockopt.go
deleted file mode 100644
index d7a0e35676a..00000000000
--- a/vendor/honnef.co/go/tools/ir/blockopt.go
+++ /dev/null
@@ -1,209 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ir
-
-// Simple block optimizations to simplify the control flow graph.
-
-// TODO(adonovan): opt: instead of creating several "unreachable" blocks
-// per function in the Builder, reuse a single one (e.g. at Blocks[1])
-// to reduce garbage.
-
-import (
- "fmt"
- "os"
-)
-
-// If true, perform sanity checking and show progress at each
-// successive iteration of optimizeBlocks. Very verbose.
-const debugBlockOpt = false
-
-// markReachable sets Index=-1 for all blocks reachable from b.
-func markReachable(b *BasicBlock) {
- b.gaps = -1
- for _, succ := range b.Succs {
- if succ.gaps == 0 {
- markReachable(succ)
- }
- }
-}
-
-// deleteUnreachableBlocks marks all reachable blocks of f and
-// eliminates (nils) all others, including possibly cyclic subgraphs.
-//
-func deleteUnreachableBlocks(f *Function) {
- const white, black = 0, -1
- // We borrow b.gaps temporarily as the mark bit.
- for _, b := range f.Blocks {
- b.gaps = white
- }
- markReachable(f.Blocks[0])
- // In SSI form, we need the exit to be reachable for correct
- // post-dominance information. In original form, however, we
- // cannot unconditionally mark it reachable because we won't
- // be adding fake edges, and this breaks the calculation of
- // dominance information.
- markReachable(f.Exit)
- for i, b := range f.Blocks {
- if b.gaps == white {
- for _, c := range b.Succs {
- if c.gaps == black {
- c.removePred(b) // delete white->black edge
- }
- }
- if debugBlockOpt {
- fmt.Fprintln(os.Stderr, "unreachable", b)
- }
- f.Blocks[i] = nil // delete b
- }
- }
- f.removeNilBlocks()
-}
-
-// jumpThreading attempts to apply simple jump-threading to block b,
-// in which a->b->c become a->c if b is just a Jump.
-// The result is true if the optimization was applied.
-//
-func jumpThreading(f *Function, b *BasicBlock) bool {
- if b.Index == 0 {
- return false // don't apply to entry block
- }
- if b.Instrs == nil {
- return false
- }
- for _, pred := range b.Preds {
- switch pred.Control().(type) {
- case *ConstantSwitch:
- // don't optimize away the head blocks of switch statements
- return false
- }
- }
- if _, ok := b.Instrs[0].(*Jump); !ok {
- return false // not just a jump
- }
- c := b.Succs[0]
- if c == b {
- return false // don't apply to degenerate jump-to-self.
- }
- if c.hasPhi() {
- return false // not sound without more effort
- }
- for j, a := range b.Preds {
- a.replaceSucc(b, c)
-
- // If a now has two edges to c, replace its degenerate If by Jump.
- if len(a.Succs) == 2 && a.Succs[0] == c && a.Succs[1] == c {
- jump := new(Jump)
- jump.setBlock(a)
- a.Instrs[len(a.Instrs)-1] = jump
- a.Succs = a.Succs[:1]
- c.removePred(b)
- } else {
- if j == 0 {
- c.replacePred(b, a)
- } else {
- c.Preds = append(c.Preds, a)
- }
- }
-
- if debugBlockOpt {
- fmt.Fprintln(os.Stderr, "jumpThreading", a, b, c)
- }
- }
- f.Blocks[b.Index] = nil // delete b
- return true
-}
-
-// fuseBlocks attempts to apply the block fusion optimization to block
-// a, in which a->b becomes ab if len(a.Succs)==len(b.Preds)==1.
-// The result is true if the optimization was applied.
-//
-func fuseBlocks(f *Function, a *BasicBlock) bool {
- if len(a.Succs) != 1 {
- return false
- }
- if a.Succs[0] == f.Exit {
- return false
- }
- b := a.Succs[0]
- if len(b.Preds) != 1 {
- return false
- }
- if _, ok := a.Instrs[len(a.Instrs)-1].(*Panic); ok {
- // panics aren't simple jumps, they have side effects.
- return false
- }
-
- // Degenerate &&/|| ops may result in a straight-line CFG
- // containing φ-nodes. (Ideally we'd replace such them with
- // their sole operand but that requires Referrers, built later.)
- if b.hasPhi() {
- return false // not sound without further effort
- }
-
- // Eliminate jump at end of A, then copy all of B across.
- a.Instrs = append(a.Instrs[:len(a.Instrs)-1], b.Instrs...)
- for _, instr := range b.Instrs {
- instr.setBlock(a)
- }
-
- // A inherits B's successors
- a.Succs = append(a.succs2[:0], b.Succs...)
-
- // Fix up Preds links of all successors of B.
- for _, c := range b.Succs {
- c.replacePred(b, a)
- }
-
- if debugBlockOpt {
- fmt.Fprintln(os.Stderr, "fuseBlocks", a, b)
- }
-
- f.Blocks[b.Index] = nil // delete b
- return true
-}
-
-// optimizeBlocks() performs some simple block optimizations on a
-// completed function: dead block elimination, block fusion, jump
-// threading.
-//
-func optimizeBlocks(f *Function) {
- if debugBlockOpt {
- f.WriteTo(os.Stderr)
- mustSanityCheck(f, nil)
- }
-
- deleteUnreachableBlocks(f)
-
- // Loop until no further progress.
- changed := true
- for changed {
- changed = false
-
- if debugBlockOpt {
- f.WriteTo(os.Stderr)
- mustSanityCheck(f, nil)
- }
-
- for _, b := range f.Blocks {
- // f.Blocks will temporarily contain nils to indicate
- // deleted blocks; we remove them at the end.
- if b == nil {
- continue
- }
-
- // Fuse blocks. b->c becomes bc.
- if fuseBlocks(f, b) {
- changed = true
- }
-
- // a->b->c becomes a->c if b contains only a Jump.
- if jumpThreading(f, b) {
- changed = true
- continue // (b was disconnected)
- }
- }
- }
- f.removeNilBlocks()
-}
diff --git a/vendor/honnef.co/go/tools/ir/builder.go b/vendor/honnef.co/go/tools/ir/builder.go
deleted file mode 100644
index fdf4cb1a91a..00000000000
--- a/vendor/honnef.co/go/tools/ir/builder.go
+++ /dev/null
@@ -1,2474 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ir
-
-// This file implements the BUILD phase of IR construction.
-//
-// IR construction has two phases, CREATE and BUILD. In the CREATE phase
-// (create.go), all packages are constructed and type-checked and
-// definitions of all package members are created, method-sets are
-// computed, and wrapper methods are synthesized.
-// ir.Packages are created in arbitrary order.
-//
-// In the BUILD phase (builder.go), the builder traverses the AST of
-// each Go source function and generates IR instructions for the
-// function body. Initializer expressions for package-level variables
-// are emitted to the package's init() function in the order specified
-// by go/types.Info.InitOrder, then code for each function in the
-// package is generated in lexical order.
-//
-// The builder's and Program's indices (maps) are populated and
-// mutated during the CREATE phase, but during the BUILD phase they
-// remain constant. The sole exception is Prog.methodSets and its
-// related maps, which are protected by a dedicated mutex.
-
-import (
- "fmt"
- "go/ast"
- "go/constant"
- "go/token"
- "go/types"
- "os"
-)
-
-type opaqueType struct {
- types.Type
- name string
-}
-
-func (t *opaqueType) String() string { return t.name }
-
-var (
- varOk = newVar("ok", tBool)
- varIndex = newVar("index", tInt)
-
- // Type constants.
- tBool = types.Typ[types.Bool]
- tByte = types.Typ[types.Byte]
- tInt = types.Typ[types.Int]
- tInvalid = types.Typ[types.Invalid]
- tString = types.Typ[types.String]
- tUntypedNil = types.Typ[types.UntypedNil]
- tRangeIter = &opaqueType{nil, "iter"} // the type of all "range" iterators
- tEface = types.NewInterfaceType(nil, nil).Complete()
-)
-
-// builder holds state associated with the package currently being built.
-// Its methods contain all the logic for AST-to-IR conversion.
-type builder struct {
- printFunc string
-
- blocksets [5]BlockSet
-}
-
-// cond emits to fn code to evaluate boolean condition e and jump
-// to t or f depending on its value, performing various simplifications.
-//
-// Postcondition: fn.currentBlock is nil.
-//
-func (b *builder) cond(fn *Function, e ast.Expr, t, f *BasicBlock) *If {
- switch e := e.(type) {
- case *ast.ParenExpr:
- return b.cond(fn, e.X, t, f)
-
- case *ast.BinaryExpr:
- switch e.Op {
- case token.LAND:
- ltrue := fn.newBasicBlock("cond.true")
- b.cond(fn, e.X, ltrue, f)
- fn.currentBlock = ltrue
- return b.cond(fn, e.Y, t, f)
-
- case token.LOR:
- lfalse := fn.newBasicBlock("cond.false")
- b.cond(fn, e.X, t, lfalse)
- fn.currentBlock = lfalse
- return b.cond(fn, e.Y, t, f)
- }
-
- case *ast.UnaryExpr:
- if e.Op == token.NOT {
- return b.cond(fn, e.X, f, t)
- }
- }
-
- // A traditional compiler would simplify "if false" (etc) here
- // but we do not, for better fidelity to the source code.
- //
- // The value of a constant condition may be platform-specific,
- // and may cause blocks that are reachable in some configuration
- // to be hidden from subsequent analyses such as bug-finding tools.
- return emitIf(fn, b.expr(fn, e), t, f, e)
-}
-
-// logicalBinop emits code to fn to evaluate e, a &&- or
-// ||-expression whose reified boolean value is wanted.
-// The value is returned.
-//
-func (b *builder) logicalBinop(fn *Function, e *ast.BinaryExpr) Value {
- rhs := fn.newBasicBlock("binop.rhs")
- done := fn.newBasicBlock("binop.done")
-
- // T(e) = T(e.X) = T(e.Y) after untyped constants have been
- // eliminated.
- // TODO(adonovan): not true; MyBool==MyBool yields UntypedBool.
- t := fn.Pkg.typeOf(e)
-
- var short Value // value of the short-circuit path
- switch e.Op {
- case token.LAND:
- b.cond(fn, e.X, rhs, done)
- short = emitConst(fn, NewConst(constant.MakeBool(false), t))
-
- case token.LOR:
- b.cond(fn, e.X, done, rhs)
- short = emitConst(fn, NewConst(constant.MakeBool(true), t))
- }
-
- // Is rhs unreachable?
- if rhs.Preds == nil {
- // Simplify false&&y to false, true||y to true.
- fn.currentBlock = done
- return short
- }
-
- // Is done unreachable?
- if done.Preds == nil {
- // Simplify true&&y (or false||y) to y.
- fn.currentBlock = rhs
- return b.expr(fn, e.Y)
- }
-
- // All edges from e.X to done carry the short-circuit value.
- var edges []Value
- for range done.Preds {
- edges = append(edges, short)
- }
-
- // The edge from e.Y to done carries the value of e.Y.
- fn.currentBlock = rhs
- edges = append(edges, b.expr(fn, e.Y))
- emitJump(fn, done, e)
- fn.currentBlock = done
-
- phi := &Phi{Edges: edges}
- phi.typ = t
- return done.emit(phi, e)
-}
-
-// exprN lowers a multi-result expression e to IR form, emitting code
-// to fn and returning a single Value whose type is a *types.Tuple.
-// The caller must access the components via Extract.
-//
-// Multi-result expressions include CallExprs in a multi-value
-// assignment or return statement, and "value,ok" uses of
-// TypeAssertExpr, IndexExpr (when X is a map), and Recv.
-//
-func (b *builder) exprN(fn *Function, e ast.Expr) Value {
- typ := fn.Pkg.typeOf(e).(*types.Tuple)
- switch e := e.(type) {
- case *ast.ParenExpr:
- return b.exprN(fn, e.X)
-
- case *ast.CallExpr:
- // Currently, no built-in function nor type conversion
- // has multiple results, so we can avoid some of the
- // cases for single-valued CallExpr.
- var c Call
- b.setCall(fn, e, &c.Call)
- c.typ = typ
- return fn.emit(&c, e)
-
- case *ast.IndexExpr:
- mapt := fn.Pkg.typeOf(e.X).Underlying().(*types.Map)
- lookup := &MapLookup{
- X: b.expr(fn, e.X),
- Index: emitConv(fn, b.expr(fn, e.Index), mapt.Key(), e),
- CommaOk: true,
- }
- lookup.setType(typ)
- return fn.emit(lookup, e)
-
- case *ast.TypeAssertExpr:
- return emitTypeTest(fn, b.expr(fn, e.X), typ.At(0).Type(), e)
-
- case *ast.UnaryExpr: // must be receive <-
- return emitRecv(fn, b.expr(fn, e.X), true, typ, e)
- }
- panic(fmt.Sprintf("exprN(%T) in %s", e, fn))
-}
-
-// builtin emits to fn IR instructions to implement a call to the
-// built-in function obj with the specified arguments
-// and return type. It returns the value defined by the result.
-//
-// The result is nil if no special handling was required; in this case
-// the caller should treat this like an ordinary library function
-// call.
-//
-func (b *builder) builtin(fn *Function, obj *types.Builtin, args []ast.Expr, typ types.Type, source ast.Node) Value {
- switch obj.Name() {
- case "make":
- switch typ.Underlying().(type) {
- case *types.Slice:
- n := b.expr(fn, args[1])
- m := n
- if len(args) == 3 {
- m = b.expr(fn, args[2])
- }
- if m, ok := m.(*Const); ok {
- // treat make([]T, n, m) as new([m]T)[:n]
- cap := m.Int64()
- at := types.NewArray(typ.Underlying().(*types.Slice).Elem(), cap)
- alloc := emitNew(fn, at, source)
- v := &Slice{
- X: alloc,
- High: n,
- }
- v.setType(typ)
- return fn.emit(v, source)
- }
- v := &MakeSlice{
- Len: n,
- Cap: m,
- }
- v.setType(typ)
- return fn.emit(v, source)
-
- case *types.Map:
- var res Value
- if len(args) == 2 {
- res = b.expr(fn, args[1])
- }
- v := &MakeMap{Reserve: res}
- v.setType(typ)
- return fn.emit(v, source)
-
- case *types.Chan:
- var sz Value = emitConst(fn, intConst(0))
- if len(args) == 2 {
- sz = b.expr(fn, args[1])
- }
- v := &MakeChan{Size: sz}
- v.setType(typ)
- return fn.emit(v, source)
- }
-
- case "new":
- alloc := emitNew(fn, deref(typ), source)
- return alloc
-
- case "len", "cap":
- // Special case: len or cap of an array or *array is
- // based on the type, not the value which may be nil.
- // We must still evaluate the value, though. (If it
- // was side-effect free, the whole call would have
- // been constant-folded.)
- t := deref(fn.Pkg.typeOf(args[0])).Underlying()
- if at, ok := t.(*types.Array); ok {
- b.expr(fn, args[0]) // for effects only
- return emitConst(fn, intConst(at.Len()))
- }
- // Otherwise treat as normal.
-
- case "panic":
- fn.emit(&Panic{
- X: emitConv(fn, b.expr(fn, args[0]), tEface, source),
- }, source)
- addEdge(fn.currentBlock, fn.Exit)
- fn.currentBlock = fn.newBasicBlock("unreachable")
- return emitConst(fn, NewConst(constant.MakeBool(true), tBool)) // any non-nil Value will do
- }
- return nil // treat all others as a regular function call
-}
-
-// addr lowers a single-result addressable expression e to IR form,
-// emitting code to fn and returning the location (an lvalue) defined
-// by the expression.
-//
-// If escaping is true, addr marks the base variable of the
-// addressable expression e as being a potentially escaping pointer
-// value. For example, in this code:
-//
-// a := A{
-// b: [1]B{B{c: 1}}
-// }
-// return &a.b[0].c
-//
-// the application of & causes a.b[0].c to have its address taken,
-// which means that ultimately the local variable a must be
-// heap-allocated. This is a simple but very conservative escape
-// analysis.
-//
-// Operations forming potentially escaping pointers include:
-// - &x, including when implicit in method call or composite literals.
-// - a[:] iff a is an array (not *array)
-// - references to variables in lexically enclosing functions.
-//
-func (b *builder) addr(fn *Function, e ast.Expr, escaping bool) lvalue {
- switch e := e.(type) {
- case *ast.Ident:
- if isBlankIdent(e) {
- return blank{}
- }
- obj := fn.Pkg.objectOf(e)
- v := fn.Prog.packageLevelValue(obj) // var (address)
- if v == nil {
- v = fn.lookup(obj, escaping)
- }
- return &address{addr: v, expr: e}
-
- case *ast.CompositeLit:
- t := deref(fn.Pkg.typeOf(e))
- var v *Alloc
- if escaping {
- v = emitNew(fn, t, e)
- } else {
- v = fn.addLocal(t, e)
- }
- var sb storebuf
- b.compLit(fn, v, e, true, &sb)
- sb.emit(fn)
- return &address{addr: v, expr: e}
-
- case *ast.ParenExpr:
- return b.addr(fn, e.X, escaping)
-
- case *ast.SelectorExpr:
- sel, ok := fn.Pkg.info.Selections[e]
- if !ok {
- // qualified identifier
- return b.addr(fn, e.Sel, escaping)
- }
- if sel.Kind() != types.FieldVal {
- panic(sel)
- }
- wantAddr := true
- v := b.receiver(fn, e.X, wantAddr, escaping, sel, e)
- last := len(sel.Index()) - 1
- return &address{
- addr: emitFieldSelection(fn, v, sel.Index()[last], true, e.Sel),
- expr: e.Sel,
- }
-
- case *ast.IndexExpr:
- var x Value
- var et types.Type
- switch t := fn.Pkg.typeOf(e.X).Underlying().(type) {
- case *types.Array:
- x = b.addr(fn, e.X, escaping).address(fn)
- et = types.NewPointer(t.Elem())
- case *types.Pointer: // *array
- x = b.expr(fn, e.X)
- et = types.NewPointer(t.Elem().Underlying().(*types.Array).Elem())
- case *types.Slice:
- x = b.expr(fn, e.X)
- et = types.NewPointer(t.Elem())
- case *types.Map:
- return &element{
- m: b.expr(fn, e.X),
- k: emitConv(fn, b.expr(fn, e.Index), t.Key(), e.Index),
- t: t.Elem(),
- }
- default:
- panic("unexpected container type in IndexExpr: " + t.String())
- }
- v := &IndexAddr{
- X: x,
- Index: emitConv(fn, b.expr(fn, e.Index), tInt, e.Index),
- }
- v.setType(et)
- return &address{addr: fn.emit(v, e), expr: e}
-
- case *ast.StarExpr:
- return &address{addr: b.expr(fn, e.X), expr: e}
- }
-
- panic(fmt.Sprintf("unexpected address expression: %T", e))
-}
-
-type store struct {
- lhs lvalue
- rhs Value
- source ast.Node
-}
-
-type storebuf struct{ stores []store }
-
-func (sb *storebuf) store(lhs lvalue, rhs Value, source ast.Node) {
- sb.stores = append(sb.stores, store{lhs, rhs, source})
-}
-
-func (sb *storebuf) emit(fn *Function) {
- for _, s := range sb.stores {
- s.lhs.store(fn, s.rhs, s.source)
- }
-}
-
-// assign emits to fn code to initialize the lvalue loc with the value
-// of expression e. If isZero is true, assign assumes that loc holds
-// the zero value for its type.
-//
-// This is equivalent to loc.store(fn, b.expr(fn, e)), but may generate
-// better code in some cases, e.g., for composite literals in an
-// addressable location.
-//
-// If sb is not nil, assign generates code to evaluate expression e, but
-// not to update loc. Instead, the necessary stores are appended to the
-// storebuf sb so that they can be executed later. This allows correct
-// in-place update of existing variables when the RHS is a composite
-// literal that may reference parts of the LHS.
-//
-func (b *builder) assign(fn *Function, loc lvalue, e ast.Expr, isZero bool, sb *storebuf, source ast.Node) {
- // Can we initialize it in place?
- if e, ok := unparen(e).(*ast.CompositeLit); ok {
- // A CompositeLit never evaluates to a pointer,
- // so if the type of the location is a pointer,
- // an &-operation is implied.
- if _, ok := loc.(blank); !ok { // avoid calling blank.typ()
- if isPointer(loc.typ()) {
- ptr := b.addr(fn, e, true).address(fn)
- // copy address
- if sb != nil {
- sb.store(loc, ptr, source)
- } else {
- loc.store(fn, ptr, source)
- }
- return
- }
- }
-
- if _, ok := loc.(*address); ok {
- if isInterface(loc.typ()) {
- // e.g. var x interface{} = T{...}
- // Can't in-place initialize an interface value.
- // Fall back to copying.
- } else {
- // x = T{...} or x := T{...}
- addr := loc.address(fn)
- if sb != nil {
- b.compLit(fn, addr, e, isZero, sb)
- } else {
- var sb storebuf
- b.compLit(fn, addr, e, isZero, &sb)
- sb.emit(fn)
- }
-
- // Subtle: emit debug ref for aggregate types only;
- // slice and map are handled by store ops in compLit.
- switch loc.typ().Underlying().(type) {
- case *types.Struct, *types.Array:
- emitDebugRef(fn, e, addr, true)
- }
-
- return
- }
- }
- }
-
- // simple case: just copy
- rhs := b.expr(fn, e)
- if sb != nil {
- sb.store(loc, rhs, source)
- } else {
- loc.store(fn, rhs, source)
- }
-}
-
-// expr lowers a single-result expression e to IR form, emitting code
-// to fn and returning the Value defined by the expression.
-//
-func (b *builder) expr(fn *Function, e ast.Expr) Value {
- e = unparen(e)
-
- tv := fn.Pkg.info.Types[e]
-
- // Is expression a constant?
- if tv.Value != nil {
- return emitConst(fn, NewConst(tv.Value, tv.Type))
- }
-
- var v Value
- if tv.Addressable() {
- // Prefer pointer arithmetic ({Index,Field}Addr) followed
- // by Load over subelement extraction (e.g. Index, Field),
- // to avoid large copies.
- v = b.addr(fn, e, false).load(fn, e)
- } else {
- v = b.expr0(fn, e, tv)
- }
- if fn.debugInfo() {
- emitDebugRef(fn, e, v, false)
- }
- return v
-}
-
-func (b *builder) expr0(fn *Function, e ast.Expr, tv types.TypeAndValue) Value {
- switch e := e.(type) {
- case *ast.BasicLit:
- panic("non-constant BasicLit") // unreachable
-
- case *ast.FuncLit:
- fn2 := &Function{
- name: fmt.Sprintf("%s$%d", fn.Name(), 1+len(fn.AnonFuncs)),
- Signature: fn.Pkg.typeOf(e.Type).Underlying().(*types.Signature),
- parent: fn,
- Pkg: fn.Pkg,
- Prog: fn.Prog,
- functionBody: new(functionBody),
- }
- fn2.source = e
- fn.AnonFuncs = append(fn.AnonFuncs, fn2)
- fn2.initHTML(b.printFunc)
- b.buildFunction(fn2)
- if fn2.FreeVars == nil {
- return fn2
- }
- v := &MakeClosure{Fn: fn2}
- v.setType(tv.Type)
- for _, fv := range fn2.FreeVars {
- v.Bindings = append(v.Bindings, fv.outer)
- fv.outer = nil
- }
- return fn.emit(v, e)
-
- case *ast.TypeAssertExpr: // single-result form only
- return emitTypeAssert(fn, b.expr(fn, e.X), tv.Type, e)
-
- case *ast.CallExpr:
- if fn.Pkg.info.Types[e.Fun].IsType() {
- // Explicit type conversion, e.g. string(x) or big.Int(x)
- x := b.expr(fn, e.Args[0])
- y := emitConv(fn, x, tv.Type, e)
- return y
- }
- // Call to "intrinsic" built-ins, e.g. new, make, panic.
- if id, ok := unparen(e.Fun).(*ast.Ident); ok {
- if obj, ok := fn.Pkg.info.Uses[id].(*types.Builtin); ok {
- if v := b.builtin(fn, obj, e.Args, tv.Type, e); v != nil {
- return v
- }
- }
- }
- // Regular function call.
- var v Call
- b.setCall(fn, e, &v.Call)
- v.setType(tv.Type)
- return fn.emit(&v, e)
-
- case *ast.UnaryExpr:
- switch e.Op {
- case token.AND: // &X --- potentially escaping.
- addr := b.addr(fn, e.X, true)
- if _, ok := unparen(e.X).(*ast.StarExpr); ok {
- // &*p must panic if p is nil (http://golang.org/s/go12nil).
- // For simplicity, we'll just (suboptimally) rely
- // on the side effects of a load.
- // TODO(adonovan): emit dedicated nilcheck.
- addr.load(fn, e)
- }
- return addr.address(fn)
- case token.ADD:
- return b.expr(fn, e.X)
- case token.NOT, token.SUB, token.XOR: // ! <- - ^
- v := &UnOp{
- Op: e.Op,
- X: b.expr(fn, e.X),
- }
- v.setType(tv.Type)
- return fn.emit(v, e)
- case token.ARROW:
- return emitRecv(fn, b.expr(fn, e.X), false, tv.Type, e)
- default:
- panic(e.Op)
- }
-
- case *ast.BinaryExpr:
- switch e.Op {
- case token.LAND, token.LOR:
- return b.logicalBinop(fn, e)
- case token.SHL, token.SHR:
- fallthrough
- case token.ADD, token.SUB, token.MUL, token.QUO, token.REM, token.AND, token.OR, token.XOR, token.AND_NOT:
- return emitArith(fn, e.Op, b.expr(fn, e.X), b.expr(fn, e.Y), tv.Type, e)
-
- case token.EQL, token.NEQ, token.GTR, token.LSS, token.LEQ, token.GEQ:
- cmp := emitCompare(fn, e.Op, b.expr(fn, e.X), b.expr(fn, e.Y), e)
- // The type of x==y may be UntypedBool.
- return emitConv(fn, cmp, types.Default(tv.Type), e)
- default:
- panic("illegal op in BinaryExpr: " + e.Op.String())
- }
-
- case *ast.SliceExpr:
- var low, high, max Value
- var x Value
- switch fn.Pkg.typeOf(e.X).Underlying().(type) {
- case *types.Array:
- // Potentially escaping.
- x = b.addr(fn, e.X, true).address(fn)
- case *types.Basic, *types.Slice, *types.Pointer: // *array
- x = b.expr(fn, e.X)
- default:
- panic("unreachable")
- }
- if e.High != nil {
- high = b.expr(fn, e.High)
- }
- if e.Low != nil {
- low = b.expr(fn, e.Low)
- }
- if e.Slice3 {
- max = b.expr(fn, e.Max)
- }
- v := &Slice{
- X: x,
- Low: low,
- High: high,
- Max: max,
- }
- v.setType(tv.Type)
- return fn.emit(v, e)
-
- case *ast.Ident:
- obj := fn.Pkg.info.Uses[e]
- // Universal built-in or nil?
- switch obj := obj.(type) {
- case *types.Builtin:
- return &Builtin{name: obj.Name(), sig: tv.Type.(*types.Signature)}
- case *types.Nil:
- return emitConst(fn, nilConst(tv.Type))
- }
- // Package-level func or var?
- if v := fn.Prog.packageLevelValue(obj); v != nil {
- if _, ok := obj.(*types.Var); ok {
- return emitLoad(fn, v, e) // var (address)
- }
- return v // (func)
- }
- // Local var.
- return emitLoad(fn, fn.lookup(obj, false), e) // var (address)
-
- case *ast.SelectorExpr:
- sel, ok := fn.Pkg.info.Selections[e]
- if !ok {
- // qualified identifier
- return b.expr(fn, e.Sel)
- }
- switch sel.Kind() {
- case types.MethodExpr:
- // (*T).f or T.f, the method f from the method-set of type T.
- // The result is a "thunk".
- return emitConv(fn, makeThunk(fn.Prog, sel), tv.Type, e)
-
- case types.MethodVal:
- // e.f where e is an expression and f is a method.
- // The result is a "bound".
- obj := sel.Obj().(*types.Func)
- rt := recvType(obj)
- wantAddr := isPointer(rt)
- escaping := true
- v := b.receiver(fn, e.X, wantAddr, escaping, sel, e)
- if isInterface(rt) {
- // If v has interface type I,
- // we must emit a check that v is non-nil.
- // We use: typeassert v.(I).
- emitTypeAssert(fn, v, rt, e)
- }
- c := &MakeClosure{
- Fn: makeBound(fn.Prog, obj),
- Bindings: []Value{v},
- }
- c.source = e.Sel
- c.setType(tv.Type)
- return fn.emit(c, e)
-
- case types.FieldVal:
- indices := sel.Index()
- last := len(indices) - 1
- v := b.expr(fn, e.X)
- v = emitImplicitSelections(fn, v, indices[:last], e)
- v = emitFieldSelection(fn, v, indices[last], false, e.Sel)
- return v
- }
-
- panic("unexpected expression-relative selector")
-
- case *ast.IndexExpr:
- switch t := fn.Pkg.typeOf(e.X).Underlying().(type) {
- case *types.Array:
- // Non-addressable array (in a register).
- v := &Index{
- X: b.expr(fn, e.X),
- Index: emitConv(fn, b.expr(fn, e.Index), tInt, e.Index),
- }
- v.setType(t.Elem())
- return fn.emit(v, e)
-
- case *types.Map:
- // Maps are not addressable.
- mapt := fn.Pkg.typeOf(e.X).Underlying().(*types.Map)
- v := &MapLookup{
- X: b.expr(fn, e.X),
- Index: emitConv(fn, b.expr(fn, e.Index), mapt.Key(), e.Index),
- }
- v.setType(mapt.Elem())
- return fn.emit(v, e)
-
- case *types.Basic: // => string
- // Strings are not addressable.
- v := &StringLookup{
- X: b.expr(fn, e.X),
- Index: b.expr(fn, e.Index),
- }
- v.setType(tByte)
- return fn.emit(v, e)
-
- case *types.Slice, *types.Pointer: // *array
- // Addressable slice/array; use IndexAddr and Load.
- return b.addr(fn, e, false).load(fn, e)
-
- default:
- panic("unexpected container type in IndexExpr: " + t.String())
- }
-
- case *ast.CompositeLit, *ast.StarExpr:
- // Addressable types (lvalues)
- return b.addr(fn, e, false).load(fn, e)
- }
-
- panic(fmt.Sprintf("unexpected expr: %T", e))
-}
-
-// stmtList emits to fn code for all statements in list.
-func (b *builder) stmtList(fn *Function, list []ast.Stmt) {
- for _, s := range list {
- b.stmt(fn, s)
- }
-}
-
-// receiver emits to fn code for expression e in the "receiver"
-// position of selection e.f (where f may be a field or a method) and
-// returns the effective receiver after applying the implicit field
-// selections of sel.
-//
-// wantAddr requests that the result is an an address. If
-// !sel.Indirect(), this may require that e be built in addr() mode; it
-// must thus be addressable.
-//
-// escaping is defined as per builder.addr().
-//
-func (b *builder) receiver(fn *Function, e ast.Expr, wantAddr, escaping bool, sel *types.Selection, source ast.Node) Value {
- var v Value
- if wantAddr && !sel.Indirect() && !isPointer(fn.Pkg.typeOf(e)) {
- v = b.addr(fn, e, escaping).address(fn)
- } else {
- v = b.expr(fn, e)
- }
-
- last := len(sel.Index()) - 1
- v = emitImplicitSelections(fn, v, sel.Index()[:last], source)
- if !wantAddr && isPointer(v.Type()) {
- v = emitLoad(fn, v, e)
- }
- return v
-}
-
-// setCallFunc populates the function parts of a CallCommon structure
-// (Func, Method, Recv, Args[0]) based on the kind of invocation
-// occurring in e.
-//
-func (b *builder) setCallFunc(fn *Function, e *ast.CallExpr, c *CallCommon) {
- // Is this a method call?
- if selector, ok := unparen(e.Fun).(*ast.SelectorExpr); ok {
- sel, ok := fn.Pkg.info.Selections[selector]
- if ok && sel.Kind() == types.MethodVal {
- obj := sel.Obj().(*types.Func)
- recv := recvType(obj)
- wantAddr := isPointer(recv)
- escaping := true
- v := b.receiver(fn, selector.X, wantAddr, escaping, sel, selector)
- if isInterface(recv) {
- // Invoke-mode call.
- c.Value = v
- c.Method = obj
- } else {
- // "Call"-mode call.
- c.Value = fn.Prog.declaredFunc(obj)
- c.Args = append(c.Args, v)
- }
- return
- }
-
- // sel.Kind()==MethodExpr indicates T.f() or (*T).f():
- // a statically dispatched call to the method f in the
- // method-set of T or *T. T may be an interface.
- //
- // e.Fun would evaluate to a concrete method, interface
- // wrapper function, or promotion wrapper.
- //
- // For now, we evaluate it in the usual way.
- //
- // TODO(adonovan): opt: inline expr() here, to make the
- // call static and to avoid generation of wrappers.
- // It's somewhat tricky as it may consume the first
- // actual parameter if the call is "invoke" mode.
- //
- // Examples:
- // type T struct{}; func (T) f() {} // "call" mode
- // type T interface { f() } // "invoke" mode
- //
- // type S struct{ T }
- //
- // var s S
- // S.f(s)
- // (*S).f(&s)
- //
- // Suggested approach:
- // - consume the first actual parameter expression
- // and build it with b.expr().
- // - apply implicit field selections.
- // - use MethodVal logic to populate fields of c.
- }
-
- // Evaluate the function operand in the usual way.
- c.Value = b.expr(fn, e.Fun)
-}
-
-// emitCallArgs emits to f code for the actual parameters of call e to
-// a (possibly built-in) function of effective type sig.
-// The argument values are appended to args, which is then returned.
-//
-func (b *builder) emitCallArgs(fn *Function, sig *types.Signature, e *ast.CallExpr, args []Value) []Value {
- // f(x, y, z...): pass slice z straight through.
- if e.Ellipsis != 0 {
- for i, arg := range e.Args {
- v := emitConv(fn, b.expr(fn, arg), sig.Params().At(i).Type(), arg)
- args = append(args, v)
- }
- return args
- }
-
- offset := len(args) // 1 if call has receiver, 0 otherwise
-
- // Evaluate actual parameter expressions.
- //
- // If this is a chained call of the form f(g()) where g has
- // multiple return values (MRV), they are flattened out into
- // args; a suffix of them may end up in a varargs slice.
- for _, arg := range e.Args {
- v := b.expr(fn, arg)
- if ttuple, ok := v.Type().(*types.Tuple); ok { // MRV chain
- for i, n := 0, ttuple.Len(); i < n; i++ {
- args = append(args, emitExtract(fn, v, i, arg))
- }
- } else {
- args = append(args, v)
- }
- }
-
- // Actual->formal assignability conversions for normal parameters.
- np := sig.Params().Len() // number of normal parameters
- if sig.Variadic() {
- np--
- }
- for i := 0; i < np; i++ {
- args[offset+i] = emitConv(fn, args[offset+i], sig.Params().At(i).Type(), args[offset+i].Source())
- }
-
- // Actual->formal assignability conversions for variadic parameter,
- // and construction of slice.
- if sig.Variadic() {
- varargs := args[offset+np:]
- st := sig.Params().At(np).Type().(*types.Slice)
- vt := st.Elem()
- if len(varargs) == 0 {
- args = append(args, emitConst(fn, nilConst(st)))
- } else {
- // Replace a suffix of args with a slice containing it.
- at := types.NewArray(vt, int64(len(varargs)))
- a := emitNew(fn, at, e)
- a.source = e
- for i, arg := range varargs {
- iaddr := &IndexAddr{
- X: a,
- Index: emitConst(fn, intConst(int64(i))),
- }
- iaddr.setType(types.NewPointer(vt))
- fn.emit(iaddr, e)
- emitStore(fn, iaddr, arg, arg.Source())
- }
- s := &Slice{X: a}
- s.setType(st)
- args[offset+np] = fn.emit(s, args[offset+np].Source())
- args = args[:offset+np+1]
- }
- }
- return args
-}
-
-// setCall emits to fn code to evaluate all the parameters of a function
-// call e, and populates *c with those values.
-//
-func (b *builder) setCall(fn *Function, e *ast.CallExpr, c *CallCommon) {
- // First deal with the f(...) part and optional receiver.
- b.setCallFunc(fn, e, c)
-
- // Then append the other actual parameters.
- sig, _ := fn.Pkg.typeOf(e.Fun).Underlying().(*types.Signature)
- if sig == nil {
- panic(fmt.Sprintf("no signature for call of %s", e.Fun))
- }
- c.Args = b.emitCallArgs(fn, sig, e, c.Args)
-}
-
-// assignOp emits to fn code to perform loc = val.
-func (b *builder) assignOp(fn *Function, loc lvalue, val Value, op token.Token, source ast.Node) {
- oldv := loc.load(fn, source)
- loc.store(fn, emitArith(fn, op, oldv, emitConv(fn, val, oldv.Type(), source), loc.typ(), source), source)
-}
-
-// localValueSpec emits to fn code to define all of the vars in the
-// function-local ValueSpec, spec.
-//
-func (b *builder) localValueSpec(fn *Function, spec *ast.ValueSpec) {
- switch {
- case len(spec.Values) == len(spec.Names):
- // e.g. var x, y = 0, 1
- // 1:1 assignment
- for i, id := range spec.Names {
- if !isBlankIdent(id) {
- fn.addLocalForIdent(id)
- }
- lval := b.addr(fn, id, false) // non-escaping
- b.assign(fn, lval, spec.Values[i], true, nil, spec)
- }
-
- case len(spec.Values) == 0:
- // e.g. var x, y int
- // Locals are implicitly zero-initialized.
- for _, id := range spec.Names {
- if !isBlankIdent(id) {
- lhs := fn.addLocalForIdent(id)
- if fn.debugInfo() {
- emitDebugRef(fn, id, lhs, true)
- }
- }
- }
-
- default:
- // e.g. var x, y = pos()
- tuple := b.exprN(fn, spec.Values[0])
- for i, id := range spec.Names {
- if !isBlankIdent(id) {
- fn.addLocalForIdent(id)
- lhs := b.addr(fn, id, false) // non-escaping
- lhs.store(fn, emitExtract(fn, tuple, i, id), id)
- }
- }
- }
-}
-
-// assignStmt emits code to fn for a parallel assignment of rhss to lhss.
-// isDef is true if this is a short variable declaration (:=).
-//
-// Note the similarity with localValueSpec.
-//
-func (b *builder) assignStmt(fn *Function, lhss, rhss []ast.Expr, isDef bool, source ast.Node) {
- // Side effects of all LHSs and RHSs must occur in left-to-right order.
- lvals := make([]lvalue, len(lhss))
- isZero := make([]bool, len(lhss))
- for i, lhs := range lhss {
- var lval lvalue = blank{}
- if !isBlankIdent(lhs) {
- if isDef {
- if obj := fn.Pkg.info.Defs[lhs.(*ast.Ident)]; obj != nil {
- fn.addNamedLocal(obj, lhs)
- isZero[i] = true
- }
- }
- lval = b.addr(fn, lhs, false) // non-escaping
- }
- lvals[i] = lval
- }
- if len(lhss) == len(rhss) {
- // Simple assignment: x = f() (!isDef)
- // Parallel assignment: x, y = f(), g() (!isDef)
- // or short var decl: x, y := f(), g() (isDef)
- //
- // In all cases, the RHSs may refer to the LHSs,
- // so we need a storebuf.
- var sb storebuf
- for i := range rhss {
- b.assign(fn, lvals[i], rhss[i], isZero[i], &sb, source)
- }
- sb.emit(fn)
- } else {
- // e.g. x, y = pos()
- tuple := b.exprN(fn, rhss[0])
- emitDebugRef(fn, rhss[0], tuple, false)
- for i, lval := range lvals {
- lval.store(fn, emitExtract(fn, tuple, i, source), source)
- }
- }
-}
-
-// arrayLen returns the length of the array whose composite literal elements are elts.
-func (b *builder) arrayLen(fn *Function, elts []ast.Expr) int64 {
- var max int64 = -1
- var i int64 = -1
- for _, e := range elts {
- if kv, ok := e.(*ast.KeyValueExpr); ok {
- i = b.expr(fn, kv.Key).(*Const).Int64()
- } else {
- i++
- }
- if i > max {
- max = i
- }
- }
- return max + 1
-}
-
-// compLit emits to fn code to initialize a composite literal e at
-// address addr with type typ.
-//
-// Nested composite literals are recursively initialized in place
-// where possible. If isZero is true, compLit assumes that addr
-// holds the zero value for typ.
-//
-// Because the elements of a composite literal may refer to the
-// variables being updated, as in the second line below,
-// x := T{a: 1}
-// x = T{a: x.a}
-// all the reads must occur before all the writes. Thus all stores to
-// loc are emitted to the storebuf sb for later execution.
-//
-// A CompositeLit may have pointer type only in the recursive (nested)
-// case when the type name is implicit. e.g. in []*T{{}}, the inner
-// literal has type *T behaves like &T{}.
-// In that case, addr must hold a T, not a *T.
-//
-func (b *builder) compLit(fn *Function, addr Value, e *ast.CompositeLit, isZero bool, sb *storebuf) {
- typ := deref(fn.Pkg.typeOf(e))
- switch t := typ.Underlying().(type) {
- case *types.Struct:
- if !isZero && len(e.Elts) != t.NumFields() {
- // memclear
- sb.store(&address{addr, nil}, zeroValue(fn, deref(addr.Type()), e), e)
- isZero = true
- }
- for i, e := range e.Elts {
- fieldIndex := i
- if kv, ok := e.(*ast.KeyValueExpr); ok {
- fname := kv.Key.(*ast.Ident).Name
- for i, n := 0, t.NumFields(); i < n; i++ {
- sf := t.Field(i)
- if sf.Name() == fname {
- fieldIndex = i
- e = kv.Value
- break
- }
- }
- }
- sf := t.Field(fieldIndex)
- faddr := &FieldAddr{
- X: addr,
- Field: fieldIndex,
- }
- faddr.setType(types.NewPointer(sf.Type()))
- fn.emit(faddr, e)
- b.assign(fn, &address{addr: faddr, expr: e}, e, isZero, sb, e)
- }
-
- case *types.Array, *types.Slice:
- var at *types.Array
- var array Value
- switch t := t.(type) {
- case *types.Slice:
- at = types.NewArray(t.Elem(), b.arrayLen(fn, e.Elts))
- alloc := emitNew(fn, at, e)
- array = alloc
- case *types.Array:
- at = t
- array = addr
-
- if !isZero && int64(len(e.Elts)) != at.Len() {
- // memclear
- sb.store(&address{array, nil}, zeroValue(fn, deref(array.Type()), e), e)
- }
- }
-
- var idx *Const
- for _, e := range e.Elts {
- if kv, ok := e.(*ast.KeyValueExpr); ok {
- idx = b.expr(fn, kv.Key).(*Const)
- e = kv.Value
- } else {
- var idxval int64
- if idx != nil {
- idxval = idx.Int64() + 1
- }
- idx = emitConst(fn, intConst(idxval))
- }
- iaddr := &IndexAddr{
- X: array,
- Index: idx,
- }
- iaddr.setType(types.NewPointer(at.Elem()))
- fn.emit(iaddr, e)
- if t != at { // slice
- // backing array is unaliased => storebuf not needed.
- b.assign(fn, &address{addr: iaddr, expr: e}, e, true, nil, e)
- } else {
- b.assign(fn, &address{addr: iaddr, expr: e}, e, true, sb, e)
- }
- }
-
- if t != at { // slice
- s := &Slice{X: array}
- s.setType(typ)
- sb.store(&address{addr: addr, expr: e}, fn.emit(s, e), e)
- }
-
- case *types.Map:
- m := &MakeMap{Reserve: emitConst(fn, intConst(int64(len(e.Elts))))}
- m.setType(typ)
- fn.emit(m, e)
- for _, e := range e.Elts {
- e := e.(*ast.KeyValueExpr)
-
- // If a key expression in a map literal is itself a
- // composite literal, the type may be omitted.
- // For example:
- // map[*struct{}]bool{{}: true}
- // An &-operation may be implied:
- // map[*struct{}]bool{&struct{}{}: true}
- var key Value
- if _, ok := unparen(e.Key).(*ast.CompositeLit); ok && isPointer(t.Key()) {
- // A CompositeLit never evaluates to a pointer,
- // so if the type of the location is a pointer,
- // an &-operation is implied.
- key = b.addr(fn, e.Key, true).address(fn)
- } else {
- key = b.expr(fn, e.Key)
- }
-
- loc := element{
- m: m,
- k: emitConv(fn, key, t.Key(), e),
- t: t.Elem(),
- }
-
- // We call assign() only because it takes care
- // of any &-operation required in the recursive
- // case, e.g.,
- // map[int]*struct{}{0: {}} implies &struct{}{}.
- // In-place update is of course impossible,
- // and no storebuf is needed.
- b.assign(fn, &loc, e.Value, true, nil, e)
- }
- sb.store(&address{addr: addr, expr: e}, m, e)
-
- default:
- panic("unexpected CompositeLit type: " + t.String())
- }
-}
-
-func (b *builder) switchStmt(fn *Function, s *ast.SwitchStmt, label *lblock) {
- if s.Tag == nil {
- b.switchStmtDynamic(fn, s, label)
- return
- }
- dynamic := false
- for _, iclause := range s.Body.List {
- clause := iclause.(*ast.CaseClause)
- for _, cond := range clause.List {
- if fn.Pkg.info.Types[unparen(cond)].Value == nil {
- dynamic = true
- break
- }
- }
- }
-
- if dynamic {
- b.switchStmtDynamic(fn, s, label)
- return
- }
-
- if s.Init != nil {
- b.stmt(fn, s.Init)
- }
-
- entry := fn.currentBlock
- tag := b.expr(fn, s.Tag)
-
- heads := make([]*BasicBlock, 0, len(s.Body.List))
- bodies := make([]*BasicBlock, len(s.Body.List))
- conds := make([]Value, 0, len(s.Body.List))
-
- hasDefault := false
- done := fn.newBasicBlock(fmt.Sprintf("switch.done"))
- if label != nil {
- label._break = done
- }
- for i, stmt := range s.Body.List {
- body := fn.newBasicBlock(fmt.Sprintf("switch.body.%d", i))
- bodies[i] = body
- cas := stmt.(*ast.CaseClause)
- if cas.List == nil {
- // default branch
- hasDefault = true
- head := fn.newBasicBlock(fmt.Sprintf("switch.head.%d", i))
- conds = append(conds, nil)
- heads = append(heads, head)
- fn.currentBlock = head
- emitJump(fn, body, cas)
- }
- for j, cond := range stmt.(*ast.CaseClause).List {
- fn.currentBlock = entry
- head := fn.newBasicBlock(fmt.Sprintf("switch.head.%d.%d", i, j))
- conds = append(conds, b.expr(fn, cond))
- heads = append(heads, head)
- fn.currentBlock = head
- emitJump(fn, body, cond)
- }
- }
-
- for i, stmt := range s.Body.List {
- clause := stmt.(*ast.CaseClause)
- body := bodies[i]
- fn.currentBlock = body
- fallthru := done
- if i+1 < len(bodies) {
- fallthru = bodies[i+1]
- }
- fn.targets = &targets{
- tail: fn.targets,
- _break: done,
- _fallthrough: fallthru,
- }
- b.stmtList(fn, clause.Body)
- fn.targets = fn.targets.tail
- emitJump(fn, done, stmt)
- }
-
- if !hasDefault {
- head := fn.newBasicBlock(fmt.Sprintf("switch.head.implicit-default"))
- body := fn.newBasicBlock("switch.body.implicit-default")
- fn.currentBlock = head
- emitJump(fn, body, s)
- fn.currentBlock = body
- emitJump(fn, done, s)
- heads = append(heads, head)
- conds = append(conds, nil)
- }
-
- if len(heads) != len(conds) {
- panic(fmt.Sprintf("internal error: %d heads for %d conds", len(heads), len(conds)))
- }
- for _, head := range heads {
- addEdge(entry, head)
- }
- fn.currentBlock = entry
- entry.emit(&ConstantSwitch{
- Tag: tag,
- Conds: conds,
- }, s)
- fn.currentBlock = done
-}
-
-// switchStmt emits to fn code for the switch statement s, optionally
-// labelled by label.
-//
-func (b *builder) switchStmtDynamic(fn *Function, s *ast.SwitchStmt, label *lblock) {
- // We treat SwitchStmt like a sequential if-else chain.
- // Multiway dispatch can be recovered later by irutil.Switches()
- // to those cases that are free of side effects.
- if s.Init != nil {
- b.stmt(fn, s.Init)
- }
- kTrue := emitConst(fn, NewConst(constant.MakeBool(true), tBool))
-
- var tagv Value = kTrue
- var tagSource ast.Node = s
- if s.Tag != nil {
- tagv = b.expr(fn, s.Tag)
- tagSource = s.Tag
- }
- // lifting only considers loads and stores, but we want different
- // sigma nodes for the different comparisons. use a temporary and
- // load it in every branch.
- tag := fn.addLocal(tagv.Type(), tagSource)
- emitStore(fn, tag, tagv, tagSource)
-
- done := fn.newBasicBlock("switch.done")
- if label != nil {
- label._break = done
- }
- // We pull the default case (if present) down to the end.
- // But each fallthrough label must point to the next
- // body block in source order, so we preallocate a
- // body block (fallthru) for the next case.
- // Unfortunately this makes for a confusing block order.
- var dfltBody *[]ast.Stmt
- var dfltFallthrough *BasicBlock
- var fallthru, dfltBlock *BasicBlock
- ncases := len(s.Body.List)
- for i, clause := range s.Body.List {
- body := fallthru
- if body == nil {
- body = fn.newBasicBlock("switch.body") // first case only
- }
-
- // Preallocate body block for the next case.
- fallthru = done
- if i+1 < ncases {
- fallthru = fn.newBasicBlock("switch.body")
- }
-
- cc := clause.(*ast.CaseClause)
- if cc.List == nil {
- // Default case.
- dfltBody = &cc.Body
- dfltFallthrough = fallthru
- dfltBlock = body
- continue
- }
-
- var nextCond *BasicBlock
- for _, cond := range cc.List {
- nextCond = fn.newBasicBlock("switch.next")
- if tagv == kTrue {
- // emit a proper if/else chain instead of a comparison
- // of a value against true.
- //
- // NOTE(dh): adonovan had a todo saying "don't forget
- // conversions though". As far as I can tell, there
- // aren't any conversions that we need to take care of
- // here. `case bool(a) && bool(b)` as well as `case
- // bool(a && b)` are being taken care of by b.cond,
- // and `case a` where a is not of type bool is
- // invalid.
- b.cond(fn, cond, body, nextCond)
- } else {
- cond := emitCompare(fn, token.EQL, emitLoad(fn, tag, cond), b.expr(fn, cond), cond)
- emitIf(fn, cond, body, nextCond, cond.Source())
- }
-
- fn.currentBlock = nextCond
- }
- fn.currentBlock = body
- fn.targets = &targets{
- tail: fn.targets,
- _break: done,
- _fallthrough: fallthru,
- }
- b.stmtList(fn, cc.Body)
- fn.targets = fn.targets.tail
- emitJump(fn, done, s)
- fn.currentBlock = nextCond
- }
- if dfltBlock != nil {
- // The lack of a Source for the jump doesn't matter, block
- // fusing will get rid of the jump later.
-
- emitJump(fn, dfltBlock, s)
- fn.currentBlock = dfltBlock
- fn.targets = &targets{
- tail: fn.targets,
- _break: done,
- _fallthrough: dfltFallthrough,
- }
- b.stmtList(fn, *dfltBody)
- fn.targets = fn.targets.tail
- }
- emitJump(fn, done, s)
- fn.currentBlock = done
-}
-
-func (b *builder) typeSwitchStmt(fn *Function, s *ast.TypeSwitchStmt, label *lblock) {
- if s.Init != nil {
- b.stmt(fn, s.Init)
- }
-
- var tag Value
- switch e := s.Assign.(type) {
- case *ast.ExprStmt: // x.(type)
- tag = b.expr(fn, unparen(e.X).(*ast.TypeAssertExpr).X)
- case *ast.AssignStmt: // y := x.(type)
- tag = b.expr(fn, unparen(e.Rhs[0]).(*ast.TypeAssertExpr).X)
- default:
- panic("unreachable")
- }
- tagPtr := fn.addLocal(tag.Type(), tag.Source())
- emitStore(fn, tagPtr, tag, tag.Source())
-
- // +1 in case there's no explicit default case
- heads := make([]*BasicBlock, 0, len(s.Body.List)+1)
-
- entry := fn.currentBlock
- done := fn.newBasicBlock("done")
- if label != nil {
- label._break = done
- }
-
- // set up type switch and constant switch, populate their conditions
- tswtch := &TypeSwitch{
- Tag: emitLoad(fn, tagPtr, tag.Source()),
- Conds: make([]types.Type, 0, len(s.Body.List)+1),
- }
- cswtch := &ConstantSwitch{
- Conds: make([]Value, 0, len(s.Body.List)+1),
- }
-
- rets := make([]types.Type, 0, len(s.Body.List)+1)
- index := 0
- var default_ *ast.CaseClause
- for _, clause := range s.Body.List {
- cc := clause.(*ast.CaseClause)
- if obj := fn.Pkg.info.Implicits[cc]; obj != nil {
- fn.addNamedLocal(obj, cc)
- }
- if cc.List == nil {
- // default case
- default_ = cc
- } else {
- for _, expr := range cc.List {
- tswtch.Conds = append(tswtch.Conds, fn.Pkg.typeOf(expr))
- cswtch.Conds = append(cswtch.Conds, emitConst(fn, intConst(int64(index))))
- index++
- }
- if len(cc.List) == 1 {
- rets = append(rets, fn.Pkg.typeOf(cc.List[0]))
- } else {
- for range cc.List {
- rets = append(rets, tag.Type())
- }
- }
- }
- }
-
- // default branch
- rets = append(rets, tag.Type())
-
- var vars []*types.Var
- vars = append(vars, varIndex)
- for _, typ := range rets {
- vars = append(vars, anonVar(typ))
- }
- tswtch.setType(types.NewTuple(vars...))
- // default branch
- fn.currentBlock = entry
- fn.emit(tswtch, s)
- cswtch.Conds = append(cswtch.Conds, emitConst(fn, intConst(int64(-1))))
- // in theory we should add a local and stores/loads for tswtch, to
- // generate sigma nodes in the branches. however, there isn't any
- // useful information we could possibly attach to it.
- cswtch.Tag = emitExtract(fn, tswtch, 0, s)
- fn.emit(cswtch, s)
-
- // build heads and bodies
- index = 0
- for _, clause := range s.Body.List {
- cc := clause.(*ast.CaseClause)
- if cc.List == nil {
- continue
- }
-
- body := fn.newBasicBlock("typeswitch.body")
- for _, expr := range cc.List {
- head := fn.newBasicBlock("typeswitch.head")
- heads = append(heads, head)
- fn.currentBlock = head
-
- if obj := fn.Pkg.info.Implicits[cc]; obj != nil {
- // In a switch y := x.(type), each case clause
- // implicitly declares a distinct object y.
- // In a single-type case, y has that type.
- // In multi-type cases, 'case nil' and default,
- // y has the same type as the interface operand.
-
- l := fn.objects[obj]
- if rets[index] == tUntypedNil {
- emitStore(fn, l, emitConst(fn, nilConst(tswtch.Tag.Type())), s.Assign)
- } else {
- x := emitExtract(fn, tswtch, index+1, s.Assign)
- emitStore(fn, l, x, nil)
- }
- }
-
- emitJump(fn, body, expr)
- index++
- }
- fn.currentBlock = body
- fn.targets = &targets{
- tail: fn.targets,
- _break: done,
- }
- b.stmtList(fn, cc.Body)
- fn.targets = fn.targets.tail
- emitJump(fn, done, clause)
- }
-
- if default_ == nil {
- // implicit default
- heads = append(heads, done)
- } else {
- body := fn.newBasicBlock("typeswitch.default")
- heads = append(heads, body)
- fn.currentBlock = body
- fn.targets = &targets{
- tail: fn.targets,
- _break: done,
- }
- if obj := fn.Pkg.info.Implicits[default_]; obj != nil {
- l := fn.objects[obj]
- x := emitExtract(fn, tswtch, index+1, s.Assign)
- emitStore(fn, l, x, s)
- }
- b.stmtList(fn, default_.Body)
- fn.targets = fn.targets.tail
- emitJump(fn, done, s)
- }
-
- fn.currentBlock = entry
- for _, head := range heads {
- addEdge(entry, head)
- }
- fn.currentBlock = done
-}
-
-// selectStmt emits to fn code for the select statement s, optionally
-// labelled by label.
-//
-func (b *builder) selectStmt(fn *Function, s *ast.SelectStmt, label *lblock) (noreturn bool) {
- if len(s.Body.List) == 0 {
- instr := &Select{Blocking: true}
- instr.setType(types.NewTuple(varIndex, varOk))
- fn.emit(instr, s)
- fn.emit(new(Unreachable), s)
- addEdge(fn.currentBlock, fn.Exit)
- return true
- }
-
- // A blocking select of a single case degenerates to a
- // simple send or receive.
- // TODO(adonovan): opt: is this optimization worth its weight?
- if len(s.Body.List) == 1 {
- clause := s.Body.List[0].(*ast.CommClause)
- if clause.Comm != nil {
- b.stmt(fn, clause.Comm)
- done := fn.newBasicBlock("select.done")
- if label != nil {
- label._break = done
- }
- fn.targets = &targets{
- tail: fn.targets,
- _break: done,
- }
- b.stmtList(fn, clause.Body)
- fn.targets = fn.targets.tail
- emitJump(fn, done, clause)
- fn.currentBlock = done
- return false
- }
- }
-
- // First evaluate all channels in all cases, and find
- // the directions of each state.
- var states []*SelectState
- blocking := true
- debugInfo := fn.debugInfo()
- for _, clause := range s.Body.List {
- var st *SelectState
- switch comm := clause.(*ast.CommClause).Comm.(type) {
- case nil: // default case
- blocking = false
- continue
-
- case *ast.SendStmt: // ch<- i
- ch := b.expr(fn, comm.Chan)
- st = &SelectState{
- Dir: types.SendOnly,
- Chan: ch,
- Send: emitConv(fn, b.expr(fn, comm.Value),
- ch.Type().Underlying().(*types.Chan).Elem(), comm),
- Pos: comm.Arrow,
- }
- if debugInfo {
- st.DebugNode = comm
- }
-
- case *ast.AssignStmt: // x := <-ch
- recv := unparen(comm.Rhs[0]).(*ast.UnaryExpr)
- st = &SelectState{
- Dir: types.RecvOnly,
- Chan: b.expr(fn, recv.X),
- Pos: recv.OpPos,
- }
- if debugInfo {
- st.DebugNode = recv
- }
-
- case *ast.ExprStmt: // <-ch
- recv := unparen(comm.X).(*ast.UnaryExpr)
- st = &SelectState{
- Dir: types.RecvOnly,
- Chan: b.expr(fn, recv.X),
- Pos: recv.OpPos,
- }
- if debugInfo {
- st.DebugNode = recv
- }
- }
- states = append(states, st)
- }
-
- // We dispatch on the (fair) result of Select using a
- // switch on the returned index.
- sel := &Select{
- States: states,
- Blocking: blocking,
- }
- sel.source = s
- var vars []*types.Var
- vars = append(vars, varIndex, varOk)
- for _, st := range states {
- if st.Dir == types.RecvOnly {
- tElem := st.Chan.Type().Underlying().(*types.Chan).Elem()
- vars = append(vars, anonVar(tElem))
- }
- }
- sel.setType(types.NewTuple(vars...))
- fn.emit(sel, s)
- idx := emitExtract(fn, sel, 0, s)
-
- done := fn.newBasicBlock("select.done")
- if label != nil {
- label._break = done
- }
-
- entry := fn.currentBlock
- swtch := &ConstantSwitch{
- Tag: idx,
- // one condition per case
- Conds: make([]Value, 0, len(s.Body.List)+1),
- }
- // note that we don't need heads; a select case can only have a single condition
- var bodies []*BasicBlock
-
- state := 0
- r := 2 // index in 'sel' tuple of value; increments if st.Dir==RECV
- for _, cc := range s.Body.List {
- clause := cc.(*ast.CommClause)
- if clause.Comm == nil {
- body := fn.newBasicBlock("select.default")
- fn.currentBlock = body
- bodies = append(bodies, body)
- fn.targets = &targets{
- tail: fn.targets,
- _break: done,
- }
- b.stmtList(fn, clause.Body)
- emitJump(fn, done, s)
- fn.targets = fn.targets.tail
- swtch.Conds = append(swtch.Conds, emitConst(fn, intConst(-1)))
- continue
- }
- swtch.Conds = append(swtch.Conds, emitConst(fn, intConst(int64(state))))
- body := fn.newBasicBlock("select.body")
- fn.currentBlock = body
- bodies = append(bodies, body)
- fn.targets = &targets{
- tail: fn.targets,
- _break: done,
- }
- switch comm := clause.Comm.(type) {
- case *ast.ExprStmt: // <-ch
- if debugInfo {
- v := emitExtract(fn, sel, r, comm)
- emitDebugRef(fn, states[state].DebugNode.(ast.Expr), v, false)
- }
- r++
-
- case *ast.AssignStmt: // x := <-states[state].Chan
- if comm.Tok == token.DEFINE {
- fn.addLocalForIdent(comm.Lhs[0].(*ast.Ident))
- }
- x := b.addr(fn, comm.Lhs[0], false) // non-escaping
- v := emitExtract(fn, sel, r, comm)
- if debugInfo {
- emitDebugRef(fn, states[state].DebugNode.(ast.Expr), v, false)
- }
- x.store(fn, v, comm)
-
- if len(comm.Lhs) == 2 { // x, ok := ...
- if comm.Tok == token.DEFINE {
- fn.addLocalForIdent(comm.Lhs[1].(*ast.Ident))
- }
- ok := b.addr(fn, comm.Lhs[1], false) // non-escaping
- ok.store(fn, emitExtract(fn, sel, 1, comm), comm)
- }
- r++
- }
- b.stmtList(fn, clause.Body)
- fn.targets = fn.targets.tail
- emitJump(fn, done, s)
- state++
- }
- fn.currentBlock = entry
- fn.emit(swtch, s)
- for _, body := range bodies {
- addEdge(entry, body)
- }
- fn.currentBlock = done
- return false
-}
-
-// forStmt emits to fn code for the for statement s, optionally
-// labelled by label.
-//
-func (b *builder) forStmt(fn *Function, s *ast.ForStmt, label *lblock) {
- // ...init...
- // jump loop
- // loop:
- // if cond goto body else done
- // body:
- // ...body...
- // jump post
- // post: (target of continue)
- // ...post...
- // jump loop
- // done: (target of break)
- if s.Init != nil {
- b.stmt(fn, s.Init)
- }
- body := fn.newBasicBlock("for.body")
- done := fn.newBasicBlock("for.done") // target of 'break'
- loop := body // target of back-edge
- if s.Cond != nil {
- loop = fn.newBasicBlock("for.loop")
- }
- cont := loop // target of 'continue'
- if s.Post != nil {
- cont = fn.newBasicBlock("for.post")
- }
- if label != nil {
- label._break = done
- label._continue = cont
- }
- emitJump(fn, loop, s)
- fn.currentBlock = loop
- if loop != body {
- b.cond(fn, s.Cond, body, done)
- fn.currentBlock = body
- }
- fn.targets = &targets{
- tail: fn.targets,
- _break: done,
- _continue: cont,
- }
- b.stmt(fn, s.Body)
- fn.targets = fn.targets.tail
- emitJump(fn, cont, s)
-
- if s.Post != nil {
- fn.currentBlock = cont
- b.stmt(fn, s.Post)
- emitJump(fn, loop, s) // back-edge
- }
- fn.currentBlock = done
-}
-
-// rangeIndexed emits to fn the header for an integer-indexed loop
-// over array, *array or slice value x.
-// The v result is defined only if tv is non-nil.
-// forPos is the position of the "for" token.
-//
-func (b *builder) rangeIndexed(fn *Function, x Value, tv types.Type, source ast.Node) (k, v Value, loop, done *BasicBlock) {
- //
- // length = len(x)
- // index = -1
- // loop: (target of continue)
- // index++
- // if index < length goto body else done
- // body:
- // k = index
- // v = x[index]
- // ...body...
- // jump loop
- // done: (target of break)
-
- // Determine number of iterations.
- var length Value
- if arr, ok := deref(x.Type()).Underlying().(*types.Array); ok {
- // For array or *array, the number of iterations is
- // known statically thanks to the type. We avoid a
- // data dependence upon x, permitting later dead-code
- // elimination if x is pure, static unrolling, etc.
- // Ranging over a nil *array may have >0 iterations.
- // We still generate code for x, in case it has effects.
- length = emitConst(fn, intConst(arr.Len()))
- } else {
- // length = len(x).
- var c Call
- c.Call.Value = makeLen(x.Type())
- c.Call.Args = []Value{x}
- c.setType(tInt)
- length = fn.emit(&c, source)
- }
-
- index := fn.addLocal(tInt, source)
- emitStore(fn, index, emitConst(fn, intConst(-1)), source)
-
- loop = fn.newBasicBlock("rangeindex.loop")
- emitJump(fn, loop, source)
- fn.currentBlock = loop
-
- incr := &BinOp{
- Op: token.ADD,
- X: emitLoad(fn, index, source),
- Y: emitConst(fn, intConst(1)),
- }
- incr.setType(tInt)
- emitStore(fn, index, fn.emit(incr, source), source)
-
- body := fn.newBasicBlock("rangeindex.body")
- done = fn.newBasicBlock("rangeindex.done")
- emitIf(fn, emitCompare(fn, token.LSS, incr, length, source), body, done, source)
- fn.currentBlock = body
-
- k = emitLoad(fn, index, source)
- if tv != nil {
- switch t := x.Type().Underlying().(type) {
- case *types.Array:
- instr := &Index{
- X: x,
- Index: k,
- }
- instr.setType(t.Elem())
- v = fn.emit(instr, source)
-
- case *types.Pointer: // *array
- instr := &IndexAddr{
- X: x,
- Index: k,
- }
- instr.setType(types.NewPointer(t.Elem().Underlying().(*types.Array).Elem()))
- v = emitLoad(fn, fn.emit(instr, source), source)
-
- case *types.Slice:
- instr := &IndexAddr{
- X: x,
- Index: k,
- }
- instr.setType(types.NewPointer(t.Elem()))
- v = emitLoad(fn, fn.emit(instr, source), source)
-
- default:
- panic("rangeIndexed x:" + t.String())
- }
- }
- return
-}
-
-// rangeIter emits to fn the header for a loop using
-// Range/Next/Extract to iterate over map or string value x.
-// tk and tv are the types of the key/value results k and v, or nil
-// if the respective component is not wanted.
-//
-func (b *builder) rangeIter(fn *Function, x Value, tk, tv types.Type, source ast.Node) (k, v Value, loop, done *BasicBlock) {
- //
- // it = range x
- // loop: (target of continue)
- // okv = next it (ok, key, value)
- // ok = extract okv #0
- // if ok goto body else done
- // body:
- // k = extract okv #1
- // v = extract okv #2
- // ...body...
- // jump loop
- // done: (target of break)
- //
-
- if tk == nil {
- tk = tInvalid
- }
- if tv == nil {
- tv = tInvalid
- }
-
- rng := &Range{X: x}
- rng.setType(tRangeIter)
- it := fn.emit(rng, source)
-
- loop = fn.newBasicBlock("rangeiter.loop")
- emitJump(fn, loop, source)
- fn.currentBlock = loop
-
- _, isString := x.Type().Underlying().(*types.Basic)
-
- okv := &Next{
- Iter: it,
- IsString: isString,
- }
- okv.setType(types.NewTuple(
- varOk,
- newVar("k", tk),
- newVar("v", tv),
- ))
- fn.emit(okv, source)
-
- body := fn.newBasicBlock("rangeiter.body")
- done = fn.newBasicBlock("rangeiter.done")
- emitIf(fn, emitExtract(fn, okv, 0, source), body, done, source)
- fn.currentBlock = body
-
- if tk != tInvalid {
- k = emitExtract(fn, okv, 1, source)
- }
- if tv != tInvalid {
- v = emitExtract(fn, okv, 2, source)
- }
- return
-}
-
-// rangeChan emits to fn the header for a loop that receives from
-// channel x until it fails.
-// tk is the channel's element type, or nil if the k result is
-// not wanted
-// pos is the position of the '=' or ':=' token.
-//
-func (b *builder) rangeChan(fn *Function, x Value, tk types.Type, source ast.Node) (k Value, loop, done *BasicBlock) {
- //
- // loop: (target of continue)
- // ko = <-x (key, ok)
- // ok = extract ko #1
- // if ok goto body else done
- // body:
- // k = extract ko #0
- // ...
- // goto loop
- // done: (target of break)
-
- loop = fn.newBasicBlock("rangechan.loop")
- emitJump(fn, loop, source)
- fn.currentBlock = loop
- retv := emitRecv(fn, x, true, types.NewTuple(newVar("k", x.Type().Underlying().(*types.Chan).Elem()), varOk), source)
- body := fn.newBasicBlock("rangechan.body")
- done = fn.newBasicBlock("rangechan.done")
- emitIf(fn, emitExtract(fn, retv, 1, source), body, done, source)
- fn.currentBlock = body
- if tk != nil {
- k = emitExtract(fn, retv, 0, source)
- }
- return
-}
-
-// rangeStmt emits to fn code for the range statement s, optionally
-// labelled by label.
-//
-func (b *builder) rangeStmt(fn *Function, s *ast.RangeStmt, label *lblock, source ast.Node) {
- var tk, tv types.Type
- if s.Key != nil && !isBlankIdent(s.Key) {
- tk = fn.Pkg.typeOf(s.Key)
- }
- if s.Value != nil && !isBlankIdent(s.Value) {
- tv = fn.Pkg.typeOf(s.Value)
- }
-
- // If iteration variables are defined (:=), this
- // occurs once outside the loop.
- //
- // Unlike a short variable declaration, a RangeStmt
- // using := never redeclares an existing variable; it
- // always creates a new one.
- if s.Tok == token.DEFINE {
- if tk != nil {
- fn.addLocalForIdent(s.Key.(*ast.Ident))
- }
- if tv != nil {
- fn.addLocalForIdent(s.Value.(*ast.Ident))
- }
- }
-
- x := b.expr(fn, s.X)
-
- var k, v Value
- var loop, done *BasicBlock
- switch rt := x.Type().Underlying().(type) {
- case *types.Slice, *types.Array, *types.Pointer: // *array
- k, v, loop, done = b.rangeIndexed(fn, x, tv, source)
-
- case *types.Chan:
- k, loop, done = b.rangeChan(fn, x, tk, source)
-
- case *types.Map, *types.Basic: // string
- k, v, loop, done = b.rangeIter(fn, x, tk, tv, source)
-
- default:
- panic("Cannot range over: " + rt.String())
- }
-
- // Evaluate both LHS expressions before we update either.
- var kl, vl lvalue
- if tk != nil {
- kl = b.addr(fn, s.Key, false) // non-escaping
- }
- if tv != nil {
- vl = b.addr(fn, s.Value, false) // non-escaping
- }
- if tk != nil {
- kl.store(fn, k, s)
- }
- if tv != nil {
- vl.store(fn, v, s)
- }
-
- if label != nil {
- label._break = done
- label._continue = loop
- }
-
- fn.targets = &targets{
- tail: fn.targets,
- _break: done,
- _continue: loop,
- }
- b.stmt(fn, s.Body)
- fn.targets = fn.targets.tail
- emitJump(fn, loop, source) // back-edge
- fn.currentBlock = done
-}
-
-// stmt lowers statement s to IR form, emitting code to fn.
-func (b *builder) stmt(fn *Function, _s ast.Stmt) {
- // The label of the current statement. If non-nil, its _goto
- // target is always set; its _break and _continue are set only
- // within the body of switch/typeswitch/select/for/range.
- // It is effectively an additional default-nil parameter of stmt().
- var label *lblock
-start:
- switch s := _s.(type) {
- case *ast.EmptyStmt:
- // ignore. (Usually removed by gofmt.)
-
- case *ast.DeclStmt: // Con, Var or Typ
- d := s.Decl.(*ast.GenDecl)
- if d.Tok == token.VAR {
- for _, spec := range d.Specs {
- if vs, ok := spec.(*ast.ValueSpec); ok {
- b.localValueSpec(fn, vs)
- }
- }
- }
-
- case *ast.LabeledStmt:
- label = fn.labelledBlock(s.Label)
- emitJump(fn, label._goto, s)
- fn.currentBlock = label._goto
- _s = s.Stmt
- goto start // effectively: tailcall stmt(fn, s.Stmt, label)
-
- case *ast.ExprStmt:
- b.expr(fn, s.X)
-
- case *ast.SendStmt:
- instr := &Send{
- Chan: b.expr(fn, s.Chan),
- X: emitConv(fn, b.expr(fn, s.Value),
- fn.Pkg.typeOf(s.Chan).Underlying().(*types.Chan).Elem(), s),
- }
- fn.emit(instr, s)
-
- case *ast.IncDecStmt:
- op := token.ADD
- if s.Tok == token.DEC {
- op = token.SUB
- }
- loc := b.addr(fn, s.X, false)
- b.assignOp(fn, loc, emitConst(fn, NewConst(constant.MakeInt64(1), loc.typ())), op, s)
-
- case *ast.AssignStmt:
- switch s.Tok {
- case token.ASSIGN, token.DEFINE:
- b.assignStmt(fn, s.Lhs, s.Rhs, s.Tok == token.DEFINE, _s)
-
- default: // +=, etc.
- op := s.Tok + token.ADD - token.ADD_ASSIGN
- b.assignOp(fn, b.addr(fn, s.Lhs[0], false), b.expr(fn, s.Rhs[0]), op, s)
- }
-
- case *ast.GoStmt:
- // The "intrinsics" new/make/len/cap are forbidden here.
- // panic is treated like an ordinary function call.
- v := Go{}
- b.setCall(fn, s.Call, &v.Call)
- fn.emit(&v, s)
-
- case *ast.DeferStmt:
- // The "intrinsics" new/make/len/cap are forbidden here.
- // panic is treated like an ordinary function call.
- v := Defer{}
- b.setCall(fn, s.Call, &v.Call)
- fn.hasDefer = true
- fn.emit(&v, s)
-
- case *ast.ReturnStmt:
- // TODO(dh): we could emit tigher position information by
- // using the ith returned expression
-
- var results []Value
- if len(s.Results) == 1 && fn.Signature.Results().Len() > 1 {
- // Return of one expression in a multi-valued function.
- tuple := b.exprN(fn, s.Results[0])
- ttuple := tuple.Type().(*types.Tuple)
- for i, n := 0, ttuple.Len(); i < n; i++ {
- results = append(results,
- emitConv(fn, emitExtract(fn, tuple, i, s),
- fn.Signature.Results().At(i).Type(), s))
- }
- } else {
- // 1:1 return, or no-arg return in non-void function.
- for i, r := range s.Results {
- v := emitConv(fn, b.expr(fn, r), fn.Signature.Results().At(i).Type(), s)
- results = append(results, v)
- }
- }
-
- ret := fn.results()
- for i, r := range results {
- emitStore(fn, ret[i], r, s)
- }
-
- emitJump(fn, fn.Exit, s)
- fn.currentBlock = fn.newBasicBlock("unreachable")
-
- case *ast.BranchStmt:
- var block *BasicBlock
- switch s.Tok {
- case token.BREAK:
- if s.Label != nil {
- block = fn.labelledBlock(s.Label)._break
- } else {
- for t := fn.targets; t != nil && block == nil; t = t.tail {
- block = t._break
- }
- }
-
- case token.CONTINUE:
- if s.Label != nil {
- block = fn.labelledBlock(s.Label)._continue
- } else {
- for t := fn.targets; t != nil && block == nil; t = t.tail {
- block = t._continue
- }
- }
-
- case token.FALLTHROUGH:
- for t := fn.targets; t != nil && block == nil; t = t.tail {
- block = t._fallthrough
- }
-
- case token.GOTO:
- block = fn.labelledBlock(s.Label)._goto
- }
- j := emitJump(fn, block, s)
- j.Comment = s.Tok.String()
- fn.currentBlock = fn.newBasicBlock("unreachable")
-
- case *ast.BlockStmt:
- b.stmtList(fn, s.List)
-
- case *ast.IfStmt:
- if s.Init != nil {
- b.stmt(fn, s.Init)
- }
- then := fn.newBasicBlock("if.then")
- done := fn.newBasicBlock("if.done")
- els := done
- if s.Else != nil {
- els = fn.newBasicBlock("if.else")
- }
- instr := b.cond(fn, s.Cond, then, els)
- instr.source = s
- fn.currentBlock = then
- b.stmt(fn, s.Body)
- emitJump(fn, done, s)
-
- if s.Else != nil {
- fn.currentBlock = els
- b.stmt(fn, s.Else)
- emitJump(fn, done, s)
- }
-
- fn.currentBlock = done
-
- case *ast.SwitchStmt:
- b.switchStmt(fn, s, label)
-
- case *ast.TypeSwitchStmt:
- b.typeSwitchStmt(fn, s, label)
-
- case *ast.SelectStmt:
- if b.selectStmt(fn, s, label) {
- // the select has no cases, it blocks forever
- fn.currentBlock = fn.newBasicBlock("unreachable")
- }
-
- case *ast.ForStmt:
- b.forStmt(fn, s, label)
-
- case *ast.RangeStmt:
- b.rangeStmt(fn, s, label, s)
-
- default:
- panic(fmt.Sprintf("unexpected statement kind: %T", s))
- }
-}
-
-// buildFunction builds IR code for the body of function fn. Idempotent.
-func (b *builder) buildFunction(fn *Function) {
- if fn.Blocks != nil {
- return // building already started
- }
-
- var recvField *ast.FieldList
- var body *ast.BlockStmt
- var functype *ast.FuncType
- switch n := fn.source.(type) {
- case nil:
- return // not a Go source function. (Synthetic, or from object file.)
- case *ast.FuncDecl:
- functype = n.Type
- recvField = n.Recv
- body = n.Body
- case *ast.FuncLit:
- functype = n.Type
- body = n.Body
- default:
- panic(n)
- }
-
- if fn.Package().Pkg.Path() == "syscall" && fn.Name() == "Exit" {
- // syscall.Exit is a stub and the way os.Exit terminates the
- // process. Note that there are other functions in the runtime
- // that also terminate or unwind that we cannot analyze.
- // However, they aren't stubs, so buildExits ends up getting
- // called on them, so that's where we handle those special
- // cases.
- fn.WillExit = true
- }
-
- if body == nil {
- // External function.
- if fn.Params == nil {
- // This condition ensures we add a non-empty
- // params list once only, but we may attempt
- // the degenerate empty case repeatedly.
- // TODO(adonovan): opt: don't do that.
-
- // We set Function.Params even though there is no body
- // code to reference them. This simplifies clients.
- if recv := fn.Signature.Recv(); recv != nil {
- // XXX synthesize an ast.Node
- fn.addParamObj(recv, nil)
- }
- params := fn.Signature.Params()
- for i, n := 0, params.Len(); i < n; i++ {
- // XXX synthesize an ast.Node
- fn.addParamObj(params.At(i), nil)
- }
- }
- return
- }
- if fn.Prog.mode&LogSource != 0 {
- defer logStack("build function %s @ %s", fn, fn.Prog.Fset.Position(fn.Pos()))()
- }
- fn.blocksets = b.blocksets
- fn.startBody()
- fn.createSyntacticParams(recvField, functype)
- fn.exitBlock()
- b.stmt(fn, body)
- if cb := fn.currentBlock; cb != nil && (cb == fn.Blocks[0] || cb.Preds != nil) {
- // Control fell off the end of the function's body block.
- //
- // Block optimizations eliminate the current block, if
- // unreachable. It is a builder invariant that
- // if this no-arg return is ill-typed for
- // fn.Signature.Results, this block must be
- // unreachable. The sanity checker checks this.
- // fn.emit(new(RunDefers))
- // fn.emit(new(Return))
- emitJump(fn, fn.Exit, nil)
- }
- optimizeBlocks(fn)
- buildFakeExits(fn)
- b.buildExits(fn)
- b.addUnreachables(fn)
- fn.finishBody()
- b.blocksets = fn.blocksets
- fn.functionBody = nil
-}
-
-// buildFuncDecl builds IR code for the function or method declared
-// by decl in package pkg.
-//
-func (b *builder) buildFuncDecl(pkg *Package, decl *ast.FuncDecl) {
- id := decl.Name
- if isBlankIdent(id) {
- return // discard
- }
- fn := pkg.values[pkg.info.Defs[id]].(*Function)
- if decl.Recv == nil && id.Name == "init" {
- var v Call
- v.Call.Value = fn
- v.setType(types.NewTuple())
- pkg.init.emit(&v, decl)
- }
- fn.source = decl
- b.buildFunction(fn)
-}
-
-// Build calls Package.Build for each package in prog.
-//
-// Build is intended for whole-program analysis; a typical compiler
-// need only build a single package.
-//
-// Build is idempotent and thread-safe.
-//
-func (prog *Program) Build() {
- for _, p := range prog.packages {
- p.Build()
- }
-}
-
-// Build builds IR code for all functions and vars in package p.
-//
-// Precondition: CreatePackage must have been called for all of p's
-// direct imports (and hence its direct imports must have been
-// error-free).
-//
-// Build is idempotent and thread-safe.
-//
-func (p *Package) Build() { p.buildOnce.Do(p.build) }
-
-func (p *Package) build() {
- if p.info == nil {
- return // synthetic package, e.g. "testmain"
- }
-
- // Ensure we have runtime type info for all exported members.
- // TODO(adonovan): ideally belongs in memberFromObject, but
- // that would require package creation in topological order.
- for name, mem := range p.Members {
- if ast.IsExported(name) {
- p.Prog.needMethodsOf(mem.Type())
- }
- }
- if p.Prog.mode&LogSource != 0 {
- defer logStack("build %s", p)()
- }
- init := p.init
- init.startBody()
- init.exitBlock()
-
- var done *BasicBlock
-
- // Make init() skip if package is already initialized.
- initguard := p.Var("init$guard")
- doinit := init.newBasicBlock("init.start")
- done = init.Exit
- emitIf(init, emitLoad(init, initguard, nil), done, doinit, nil)
- init.currentBlock = doinit
- emitStore(init, initguard, emitConst(init, NewConst(constant.MakeBool(true), tBool)), nil)
-
- // Call the init() function of each package we import.
- for _, pkg := range p.Pkg.Imports() {
- prereq := p.Prog.packages[pkg]
- if prereq == nil {
- panic(fmt.Sprintf("Package(%q).Build(): unsatisfied import: Program.CreatePackage(%q) was not called", p.Pkg.Path(), pkg.Path()))
- }
- var v Call
- v.Call.Value = prereq.init
- v.setType(types.NewTuple())
- init.emit(&v, nil)
- }
-
- b := builder{
- printFunc: p.printFunc,
- }
-
- // Initialize package-level vars in correct order.
- for _, varinit := range p.info.InitOrder {
- if init.Prog.mode&LogSource != 0 {
- fmt.Fprintf(os.Stderr, "build global initializer %v @ %s\n",
- varinit.Lhs, p.Prog.Fset.Position(varinit.Rhs.Pos()))
- }
- if len(varinit.Lhs) == 1 {
- // 1:1 initialization: var x, y = a(), b()
- var lval lvalue
- if v := varinit.Lhs[0]; v.Name() != "_" {
- lval = &address{addr: p.values[v].(*Global)}
- } else {
- lval = blank{}
- }
- // TODO(dh): do emit position information
- b.assign(init, lval, varinit.Rhs, true, nil, nil)
- } else {
- // n:1 initialization: var x, y := f()
- tuple := b.exprN(init, varinit.Rhs)
- for i, v := range varinit.Lhs {
- if v.Name() == "_" {
- continue
- }
- emitStore(init, p.values[v].(*Global), emitExtract(init, tuple, i, nil), nil)
- }
- }
- }
-
- // Build all package-level functions, init functions
- // and methods, including unreachable/blank ones.
- // We build them in source order, but it's not significant.
- for _, file := range p.files {
- for _, decl := range file.Decls {
- if decl, ok := decl.(*ast.FuncDecl); ok {
- b.buildFuncDecl(p, decl)
- }
- }
- }
-
- // Finish up init().
- emitJump(init, done, nil)
- init.finishBody()
-
- p.info = nil // We no longer need ASTs or go/types deductions.
-
- if p.Prog.mode&SanityCheckFunctions != 0 {
- sanityCheckPackage(p)
- }
-}
-
-// Like ObjectOf, but panics instead of returning nil.
-// Only valid during p's create and build phases.
-func (p *Package) objectOf(id *ast.Ident) types.Object {
- if o := p.info.ObjectOf(id); o != nil {
- return o
- }
- panic(fmt.Sprintf("no types.Object for ast.Ident %s @ %s",
- id.Name, p.Prog.Fset.Position(id.Pos())))
-}
-
-// Like TypeOf, but panics instead of returning nil.
-// Only valid during p's create and build phases.
-func (p *Package) typeOf(e ast.Expr) types.Type {
- if T := p.info.TypeOf(e); T != nil {
- return T
- }
- panic(fmt.Sprintf("no type for %T @ %s",
- e, p.Prog.Fset.Position(e.Pos())))
-}
diff --git a/vendor/honnef.co/go/tools/ir/const.go b/vendor/honnef.co/go/tools/ir/const.go
deleted file mode 100644
index 7cdf006e83a..00000000000
--- a/vendor/honnef.co/go/tools/ir/const.go
+++ /dev/null
@@ -1,153 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ir
-
-// This file defines the Const SSA value type.
-
-import (
- "fmt"
- "go/constant"
- "go/types"
- "strconv"
-)
-
-// NewConst returns a new constant of the specified value and type.
-// val must be valid according to the specification of Const.Value.
-//
-func NewConst(val constant.Value, typ types.Type) *Const {
- return &Const{
- register: register{
- typ: typ,
- },
- Value: val,
- }
-}
-
-// intConst returns an 'int' constant that evaluates to i.
-// (i is an int64 in case the host is narrower than the target.)
-func intConst(i int64) *Const {
- return NewConst(constant.MakeInt64(i), tInt)
-}
-
-// nilConst returns a nil constant of the specified type, which may
-// be any reference type, including interfaces.
-//
-func nilConst(typ types.Type) *Const {
- return NewConst(nil, typ)
-}
-
-// stringConst returns a 'string' constant that evaluates to s.
-func stringConst(s string) *Const {
- return NewConst(constant.MakeString(s), tString)
-}
-
-// zeroConst returns a new "zero" constant of the specified type,
-// which must not be an array or struct type: the zero values of
-// aggregates are well-defined but cannot be represented by Const.
-//
-func zeroConst(t types.Type) *Const {
- switch t := t.(type) {
- case *types.Basic:
- switch {
- case t.Info()&types.IsBoolean != 0:
- return NewConst(constant.MakeBool(false), t)
- case t.Info()&types.IsNumeric != 0:
- return NewConst(constant.MakeInt64(0), t)
- case t.Info()&types.IsString != 0:
- return NewConst(constant.MakeString(""), t)
- case t.Kind() == types.UnsafePointer:
- fallthrough
- case t.Kind() == types.UntypedNil:
- return nilConst(t)
- default:
- panic(fmt.Sprint("zeroConst for unexpected type:", t))
- }
- case *types.Pointer, *types.Slice, *types.Interface, *types.Chan, *types.Map, *types.Signature:
- return nilConst(t)
- case *types.Named:
- return NewConst(zeroConst(t.Underlying()).Value, t)
- case *types.Array, *types.Struct, *types.Tuple:
- panic(fmt.Sprint("zeroConst applied to aggregate:", t))
- }
- panic(fmt.Sprint("zeroConst: unexpected ", t))
-}
-
-func (c *Const) RelString(from *types.Package) string {
- var p string
- if c.Value == nil {
- p = "nil"
- } else if c.Value.Kind() == constant.String {
- v := constant.StringVal(c.Value)
- const max = 20
- // TODO(adonovan): don't cut a rune in half.
- if len(v) > max {
- v = v[:max-3] + "..." // abbreviate
- }
- p = strconv.Quote(v)
- } else {
- p = c.Value.String()
- }
- return fmt.Sprintf("Const <%s> {%s}", relType(c.Type(), from), p)
-}
-
-func (c *Const) String() string {
- return c.RelString(c.Parent().pkg())
-}
-
-// IsNil returns true if this constant represents a typed or untyped nil value.
-func (c *Const) IsNil() bool {
- return c.Value == nil
-}
-
-// Int64 returns the numeric value of this constant truncated to fit
-// a signed 64-bit integer.
-//
-func (c *Const) Int64() int64 {
- switch x := constant.ToInt(c.Value); x.Kind() {
- case constant.Int:
- if i, ok := constant.Int64Val(x); ok {
- return i
- }
- return 0
- case constant.Float:
- f, _ := constant.Float64Val(x)
- return int64(f)
- }
- panic(fmt.Sprintf("unexpected constant value: %T", c.Value))
-}
-
-// Uint64 returns the numeric value of this constant truncated to fit
-// an unsigned 64-bit integer.
-//
-func (c *Const) Uint64() uint64 {
- switch x := constant.ToInt(c.Value); x.Kind() {
- case constant.Int:
- if u, ok := constant.Uint64Val(x); ok {
- return u
- }
- return 0
- case constant.Float:
- f, _ := constant.Float64Val(x)
- return uint64(f)
- }
- panic(fmt.Sprintf("unexpected constant value: %T", c.Value))
-}
-
-// Float64 returns the numeric value of this constant truncated to fit
-// a float64.
-//
-func (c *Const) Float64() float64 {
- f, _ := constant.Float64Val(c.Value)
- return f
-}
-
-// Complex128 returns the complex value of this constant truncated to
-// fit a complex128.
-//
-func (c *Const) Complex128() complex128 {
- re, _ := constant.Float64Val(constant.Real(c.Value))
- im, _ := constant.Float64Val(constant.Imag(c.Value))
- return complex(re, im)
-}
diff --git a/vendor/honnef.co/go/tools/ir/create.go b/vendor/honnef.co/go/tools/ir/create.go
deleted file mode 100644
index ff81a244bdc..00000000000
--- a/vendor/honnef.co/go/tools/ir/create.go
+++ /dev/null
@@ -1,275 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ir
-
-// This file implements the CREATE phase of IR construction.
-// See builder.go for explanation.
-
-import (
- "fmt"
- "go/ast"
- "go/token"
- "go/types"
- "os"
- "sync"
-
- "golang.org/x/tools/go/types/typeutil"
-)
-
-// NewProgram returns a new IR Program.
-//
-// mode controls diagnostics and checking during IR construction.
-//
-func NewProgram(fset *token.FileSet, mode BuilderMode) *Program {
- prog := &Program{
- Fset: fset,
- imported: make(map[string]*Package),
- packages: make(map[*types.Package]*Package),
- thunks: make(map[selectionKey]*Function),
- bounds: make(map[*types.Func]*Function),
- mode: mode,
- }
-
- h := typeutil.MakeHasher() // protected by methodsMu, in effect
- prog.methodSets.SetHasher(h)
- prog.canon.SetHasher(h)
-
- return prog
-}
-
-// memberFromObject populates package pkg with a member for the
-// typechecker object obj.
-//
-// For objects from Go source code, syntax is the associated syntax
-// tree (for funcs and vars only); it will be used during the build
-// phase.
-//
-func memberFromObject(pkg *Package, obj types.Object, syntax ast.Node) {
- name := obj.Name()
- switch obj := obj.(type) {
- case *types.Builtin:
- if pkg.Pkg != types.Unsafe {
- panic("unexpected builtin object: " + obj.String())
- }
-
- case *types.TypeName:
- pkg.Members[name] = &Type{
- object: obj,
- pkg: pkg,
- }
-
- case *types.Const:
- c := &NamedConst{
- object: obj,
- Value: NewConst(obj.Val(), obj.Type()),
- pkg: pkg,
- }
- pkg.values[obj] = c.Value
- pkg.Members[name] = c
-
- case *types.Var:
- g := &Global{
- Pkg: pkg,
- name: name,
- object: obj,
- typ: types.NewPointer(obj.Type()), // address
- }
- pkg.values[obj] = g
- pkg.Members[name] = g
-
- case *types.Func:
- sig := obj.Type().(*types.Signature)
- if sig.Recv() == nil && name == "init" {
- pkg.ninit++
- name = fmt.Sprintf("init#%d", pkg.ninit)
- }
- fn := &Function{
- name: name,
- object: obj,
- Signature: sig,
- Pkg: pkg,
- Prog: pkg.Prog,
- }
-
- fn.source = syntax
- fn.initHTML(pkg.printFunc)
- if syntax == nil {
- fn.Synthetic = "loaded from gc object file"
- } else {
- fn.functionBody = new(functionBody)
- }
-
- pkg.values[obj] = fn
- pkg.Functions = append(pkg.Functions, fn)
- if sig.Recv() == nil {
- pkg.Members[name] = fn // package-level function
- }
-
- default: // (incl. *types.Package)
- panic("unexpected Object type: " + obj.String())
- }
-}
-
-// membersFromDecl populates package pkg with members for each
-// typechecker object (var, func, const or type) associated with the
-// specified decl.
-//
-func membersFromDecl(pkg *Package, decl ast.Decl) {
- switch decl := decl.(type) {
- case *ast.GenDecl: // import, const, type or var
- switch decl.Tok {
- case token.CONST:
- for _, spec := range decl.Specs {
- for _, id := range spec.(*ast.ValueSpec).Names {
- if !isBlankIdent(id) {
- memberFromObject(pkg, pkg.info.Defs[id], nil)
- }
- }
- }
-
- case token.VAR:
- for _, spec := range decl.Specs {
- for _, id := range spec.(*ast.ValueSpec).Names {
- if !isBlankIdent(id) {
- memberFromObject(pkg, pkg.info.Defs[id], spec)
- }
- }
- }
-
- case token.TYPE:
- for _, spec := range decl.Specs {
- id := spec.(*ast.TypeSpec).Name
- if !isBlankIdent(id) {
- memberFromObject(pkg, pkg.info.Defs[id], nil)
- }
- }
- }
-
- case *ast.FuncDecl:
- id := decl.Name
- if !isBlankIdent(id) {
- memberFromObject(pkg, pkg.info.Defs[id], decl)
- }
- }
-}
-
-// CreatePackage constructs and returns an IR Package from the
-// specified type-checked, error-free file ASTs, and populates its
-// Members mapping.
-//
-// importable determines whether this package should be returned by a
-// subsequent call to ImportedPackage(pkg.Path()).
-//
-// The real work of building IR form for each function is not done
-// until a subsequent call to Package.Build().
-//
-func (prog *Program) CreatePackage(pkg *types.Package, files []*ast.File, info *types.Info, importable bool) *Package {
- p := &Package{
- Prog: prog,
- Members: make(map[string]Member),
- values: make(map[types.Object]Value),
- Pkg: pkg,
- info: info, // transient (CREATE and BUILD phases)
- files: files, // transient (CREATE and BUILD phases)
- printFunc: prog.PrintFunc,
- }
-
- // Add init() function.
- p.init = &Function{
- name: "init",
- Signature: new(types.Signature),
- Synthetic: "package initializer",
- Pkg: p,
- Prog: prog,
- functionBody: new(functionBody),
- }
- p.init.initHTML(prog.PrintFunc)
- p.Members[p.init.name] = p.init
- p.Functions = append(p.Functions, p.init)
-
- // CREATE phase.
- // Allocate all package members: vars, funcs, consts and types.
- if len(files) > 0 {
- // Go source package.
- for _, file := range files {
- for _, decl := range file.Decls {
- membersFromDecl(p, decl)
- }
- }
- } else {
- // GC-compiled binary package (or "unsafe")
- // No code.
- // No position information.
- scope := p.Pkg.Scope()
- for _, name := range scope.Names() {
- obj := scope.Lookup(name)
- memberFromObject(p, obj, nil)
- if obj, ok := obj.(*types.TypeName); ok {
- if named, ok := obj.Type().(*types.Named); ok {
- for i, n := 0, named.NumMethods(); i < n; i++ {
- memberFromObject(p, named.Method(i), nil)
- }
- }
- }
- }
- }
-
- // Add initializer guard variable.
- initguard := &Global{
- Pkg: p,
- name: "init$guard",
- typ: types.NewPointer(tBool),
- }
- p.Members[initguard.Name()] = initguard
-
- if prog.mode&GlobalDebug != 0 {
- p.SetDebugMode(true)
- }
-
- if prog.mode&PrintPackages != 0 {
- printMu.Lock()
- p.WriteTo(os.Stdout)
- printMu.Unlock()
- }
-
- if importable {
- prog.imported[p.Pkg.Path()] = p
- }
- prog.packages[p.Pkg] = p
-
- return p
-}
-
-// printMu serializes printing of Packages/Functions to stdout.
-var printMu sync.Mutex
-
-// AllPackages returns a new slice containing all packages in the
-// program prog in unspecified order.
-//
-func (prog *Program) AllPackages() []*Package {
- pkgs := make([]*Package, 0, len(prog.packages))
- for _, pkg := range prog.packages {
- pkgs = append(pkgs, pkg)
- }
- return pkgs
-}
-
-// ImportedPackage returns the importable Package whose PkgPath
-// is path, or nil if no such Package has been created.
-//
-// A parameter to CreatePackage determines whether a package should be
-// considered importable. For example, no import declaration can resolve
-// to the ad-hoc main package created by 'go build foo.go'.
-//
-// TODO(adonovan): rethink this function and the "importable" concept;
-// most packages are importable. This function assumes that all
-// types.Package.Path values are unique within the ir.Program, which is
-// false---yet this function remains very convenient.
-// Clients should use (*Program).Package instead where possible.
-// IR doesn't really need a string-keyed map of packages.
-//
-func (prog *Program) ImportedPackage(path string) *Package {
- return prog.imported[path]
-}
diff --git a/vendor/honnef.co/go/tools/ir/doc.go b/vendor/honnef.co/go/tools/ir/doc.go
deleted file mode 100644
index a5f42e4f47d..00000000000
--- a/vendor/honnef.co/go/tools/ir/doc.go
+++ /dev/null
@@ -1,129 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package ir defines a representation of the elements of Go programs
-// (packages, types, functions, variables and constants) using a
-// static single-information (SSI) form intermediate representation
-// (IR) for the bodies of functions.
-//
-// THIS INTERFACE IS EXPERIMENTAL AND IS LIKELY TO CHANGE.
-//
-// For an introduction to SSA form, upon which SSI builds, see
-// http://en.wikipedia.org/wiki/Static_single_assignment_form.
-// This page provides a broader reading list:
-// http://www.dcs.gla.ac.uk/~jsinger/ssa.html.
-//
-// For an introduction to SSI form, see The static single information
-// form by C. Scott Ananian.
-//
-// The level of abstraction of the IR form is intentionally close to
-// the source language to facilitate construction of source analysis
-// tools. It is not intended for machine code generation.
-//
-// The simplest way to create the IR of a package is
-// to load typed syntax trees using golang.org/x/tools/go/packages, then
-// invoke the irutil.Packages helper function. See ExampleLoadPackages
-// and ExampleWholeProgram for examples.
-// The resulting ir.Program contains all the packages and their
-// members, but IR code is not created for function bodies until a
-// subsequent call to (*Package).Build or (*Program).Build.
-//
-// The builder initially builds a naive IR form in which all local
-// variables are addresses of stack locations with explicit loads and
-// stores. Registerisation of eligible locals and φ-node insertion
-// using dominance and dataflow are then performed as a second pass
-// called "lifting" to improve the accuracy and performance of
-// subsequent analyses; this pass can be skipped by setting the
-// NaiveForm builder flag.
-//
-// The primary interfaces of this package are:
-//
-// - Member: a named member of a Go package.
-// - Value: an expression that yields a value.
-// - Instruction: a statement that consumes values and performs computation.
-// - Node: a Value or Instruction (emphasizing its membership in the IR value graph)
-//
-// A computation that yields a result implements both the Value and
-// Instruction interfaces. The following table shows for each
-// concrete type which of these interfaces it implements.
-//
-// Value? Instruction? Member?
-// *Alloc ✔ ✔
-// *BinOp ✔ ✔
-// *BlankStore ✔
-// *Builtin ✔
-// *Call ✔ ✔
-// *ChangeInterface ✔ ✔
-// *ChangeType ✔ ✔
-// *Const ✔ ✔
-// *Convert ✔ ✔
-// *DebugRef ✔
-// *Defer ✔ ✔
-// *Extract ✔ ✔
-// *Field ✔ ✔
-// *FieldAddr ✔ ✔
-// *FreeVar ✔
-// *Function ✔ ✔ (func)
-// *Global ✔ ✔ (var)
-// *Go ✔ ✔
-// *If ✔
-// *Index ✔ ✔
-// *IndexAddr ✔ ✔
-// *Jump ✔
-// *Load ✔ ✔
-// *MakeChan ✔ ✔
-// *MakeClosure ✔ ✔
-// *MakeInterface ✔ ✔
-// *MakeMap ✔ ✔
-// *MakeSlice ✔ ✔
-// *MapLookup ✔ ✔
-// *MapUpdate ✔ ✔
-// *NamedConst ✔ (const)
-// *Next ✔ ✔
-// *Panic ✔
-// *Parameter ✔ ✔
-// *Phi ✔ ✔
-// *Range ✔ ✔
-// *Recv ✔ ✔
-// *Return ✔
-// *RunDefers ✔
-// *Select ✔ ✔
-// *Send ✔ ✔
-// *Sigma ✔ ✔
-// *Slice ✔ ✔
-// *Store ✔ ✔
-// *StringLookup ✔ ✔
-// *Type ✔ (type)
-// *TypeAssert ✔ ✔
-// *UnOp ✔ ✔
-// *Unreachable ✔
-//
-// Other key types in this package include: Program, Package, Function
-// and BasicBlock.
-//
-// The program representation constructed by this package is fully
-// resolved internally, i.e. it does not rely on the names of Values,
-// Packages, Functions, Types or BasicBlocks for the correct
-// interpretation of the program. Only the identities of objects and
-// the topology of the IR and type graphs are semantically
-// significant. (There is one exception: Ids, used to identify field
-// and method names, contain strings.) Avoidance of name-based
-// operations simplifies the implementation of subsequent passes and
-// can make them very efficient. Many objects are nonetheless named
-// to aid in debugging, but it is not essential that the names be
-// either accurate or unambiguous. The public API exposes a number of
-// name-based maps for client convenience.
-//
-// The ir/irutil package provides various utilities that depend only
-// on the public API of this package.
-//
-// TODO(adonovan): Consider the exceptional control-flow implications
-// of defer and recover().
-//
-// TODO(adonovan): write a how-to document for all the various cases
-// of trying to determine corresponding elements across the four
-// domains of source locations, ast.Nodes, types.Objects,
-// ir.Values/Instructions.
-//
-package ir // import "honnef.co/go/tools/ir"
diff --git a/vendor/honnef.co/go/tools/ir/dom.go b/vendor/honnef.co/go/tools/ir/dom.go
deleted file mode 100644
index 08c147df9b9..00000000000
--- a/vendor/honnef.co/go/tools/ir/dom.go
+++ /dev/null
@@ -1,461 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ir
-
-// This file defines algorithms related to dominance.
-
-// Dominator tree construction ----------------------------------------
-//
-// We use the algorithm described in Lengauer & Tarjan. 1979. A fast
-// algorithm for finding dominators in a flowgraph.
-// http://doi.acm.org/10.1145/357062.357071
-//
-// We also apply the optimizations to SLT described in Georgiadis et
-// al, Finding Dominators in Practice, JGAA 2006,
-// http://jgaa.info/accepted/2006/GeorgiadisTarjanWerneck2006.10.1.pdf
-// to avoid the need for buckets of size > 1.
-
-import (
- "bytes"
- "fmt"
- "io"
- "math/big"
- "os"
- "sort"
-)
-
-// Idom returns the block that immediately dominates b:
-// its parent in the dominator tree, if any.
-// The entry node (b.Index==0) does not have a parent.
-//
-func (b *BasicBlock) Idom() *BasicBlock { return b.dom.idom }
-
-// Dominees returns the list of blocks that b immediately dominates:
-// its children in the dominator tree.
-//
-func (b *BasicBlock) Dominees() []*BasicBlock { return b.dom.children }
-
-// Dominates reports whether b dominates c.
-func (b *BasicBlock) Dominates(c *BasicBlock) bool {
- return b.dom.pre <= c.dom.pre && c.dom.post <= b.dom.post
-}
-
-type byDomPreorder []*BasicBlock
-
-func (a byDomPreorder) Len() int { return len(a) }
-func (a byDomPreorder) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-func (a byDomPreorder) Less(i, j int) bool { return a[i].dom.pre < a[j].dom.pre }
-
-// DomPreorder returns a new slice containing the blocks of f in
-// dominator tree preorder.
-//
-func (f *Function) DomPreorder() []*BasicBlock {
- n := len(f.Blocks)
- order := make(byDomPreorder, n)
- copy(order, f.Blocks)
- sort.Sort(order)
- return order
-}
-
-// domInfo contains a BasicBlock's dominance information.
-type domInfo struct {
- idom *BasicBlock // immediate dominator (parent in domtree)
- children []*BasicBlock // nodes immediately dominated by this one
- pre, post int32 // pre- and post-order numbering within domtree
-}
-
-// buildDomTree computes the dominator tree of f using the LT algorithm.
-// Precondition: all blocks are reachable (e.g. optimizeBlocks has been run).
-//
-func buildDomTree(fn *Function) {
- // The step numbers refer to the original LT paper; the
- // reordering is due to Georgiadis.
-
- // Clear any previous domInfo.
- for _, b := range fn.Blocks {
- b.dom = domInfo{}
- }
-
- idoms := make([]*BasicBlock, len(fn.Blocks))
-
- order := make([]*BasicBlock, 0, len(fn.Blocks))
- seen := fn.blockset(0)
- var dfs func(b *BasicBlock)
- dfs = func(b *BasicBlock) {
- if !seen.Add(b) {
- return
- }
- for _, succ := range b.Succs {
- dfs(succ)
- }
- if fn.fakeExits.Has(b) {
- dfs(fn.Exit)
- }
- order = append(order, b)
- b.post = len(order) - 1
- }
- dfs(fn.Blocks[0])
-
- for i := 0; i < len(order)/2; i++ {
- o := len(order) - i - 1
- order[i], order[o] = order[o], order[i]
- }
-
- idoms[fn.Blocks[0].Index] = fn.Blocks[0]
- changed := true
- for changed {
- changed = false
- // iterate over all nodes in reverse postorder, except for the
- // entry node
- for _, b := range order[1:] {
- var newIdom *BasicBlock
- do := func(p *BasicBlock) {
- if idoms[p.Index] == nil {
- return
- }
- if newIdom == nil {
- newIdom = p
- } else {
- finger1 := p
- finger2 := newIdom
- for finger1 != finger2 {
- for finger1.post < finger2.post {
- finger1 = idoms[finger1.Index]
- }
- for finger2.post < finger1.post {
- finger2 = idoms[finger2.Index]
- }
- }
- newIdom = finger1
- }
- }
- for _, p := range b.Preds {
- do(p)
- }
- if b == fn.Exit {
- for _, p := range fn.Blocks {
- if fn.fakeExits.Has(p) {
- do(p)
- }
- }
- }
-
- if idoms[b.Index] != newIdom {
- idoms[b.Index] = newIdom
- changed = true
- }
- }
- }
-
- for i, b := range idoms {
- fn.Blocks[i].dom.idom = b
- if b == nil {
- // malformed CFG
- continue
- }
- if i == b.Index {
- continue
- }
- b.dom.children = append(b.dom.children, fn.Blocks[i])
- }
-
- numberDomTree(fn.Blocks[0], 0, 0)
-
- // printDomTreeDot(os.Stderr, fn) // debugging
- // printDomTreeText(os.Stderr, root, 0) // debugging
-
- if fn.Prog.mode&SanityCheckFunctions != 0 {
- sanityCheckDomTree(fn)
- }
-}
-
-// buildPostDomTree is like buildDomTree, but builds the post-dominator tree instead.
-func buildPostDomTree(fn *Function) {
- // The step numbers refer to the original LT paper; the
- // reordering is due to Georgiadis.
-
- // Clear any previous domInfo.
- for _, b := range fn.Blocks {
- b.pdom = domInfo{}
- }
-
- idoms := make([]*BasicBlock, len(fn.Blocks))
-
- order := make([]*BasicBlock, 0, len(fn.Blocks))
- seen := fn.blockset(0)
- var dfs func(b *BasicBlock)
- dfs = func(b *BasicBlock) {
- if !seen.Add(b) {
- return
- }
- for _, pred := range b.Preds {
- dfs(pred)
- }
- if b == fn.Exit {
- for _, p := range fn.Blocks {
- if fn.fakeExits.Has(p) {
- dfs(p)
- }
- }
- }
- order = append(order, b)
- b.post = len(order) - 1
- }
- dfs(fn.Exit)
-
- for i := 0; i < len(order)/2; i++ {
- o := len(order) - i - 1
- order[i], order[o] = order[o], order[i]
- }
-
- idoms[fn.Exit.Index] = fn.Exit
- changed := true
- for changed {
- changed = false
- // iterate over all nodes in reverse postorder, except for the
- // exit node
- for _, b := range order[1:] {
- var newIdom *BasicBlock
- do := func(p *BasicBlock) {
- if idoms[p.Index] == nil {
- return
- }
- if newIdom == nil {
- newIdom = p
- } else {
- finger1 := p
- finger2 := newIdom
- for finger1 != finger2 {
- for finger1.post < finger2.post {
- finger1 = idoms[finger1.Index]
- }
- for finger2.post < finger1.post {
- finger2 = idoms[finger2.Index]
- }
- }
- newIdom = finger1
- }
- }
- for _, p := range b.Succs {
- do(p)
- }
- if fn.fakeExits.Has(b) {
- do(fn.Exit)
- }
-
- if idoms[b.Index] != newIdom {
- idoms[b.Index] = newIdom
- changed = true
- }
- }
- }
-
- for i, b := range idoms {
- fn.Blocks[i].pdom.idom = b
- if b == nil {
- // malformed CFG
- continue
- }
- if i == b.Index {
- continue
- }
- b.pdom.children = append(b.pdom.children, fn.Blocks[i])
- }
-
- numberPostDomTree(fn.Exit, 0, 0)
-
- // printPostDomTreeDot(os.Stderr, fn) // debugging
- // printPostDomTreeText(os.Stderr, fn.Exit, 0) // debugging
-
- if fn.Prog.mode&SanityCheckFunctions != 0 { // XXX
- sanityCheckDomTree(fn) // XXX
- }
-}
-
-// numberDomTree sets the pre- and post-order numbers of a depth-first
-// traversal of the dominator tree rooted at v. These are used to
-// answer dominance queries in constant time.
-//
-func numberDomTree(v *BasicBlock, pre, post int32) (int32, int32) {
- v.dom.pre = pre
- pre++
- for _, child := range v.dom.children {
- pre, post = numberDomTree(child, pre, post)
- }
- v.dom.post = post
- post++
- return pre, post
-}
-
-// numberPostDomTree sets the pre- and post-order numbers of a depth-first
-// traversal of the post-dominator tree rooted at v. These are used to
-// answer post-dominance queries in constant time.
-//
-func numberPostDomTree(v *BasicBlock, pre, post int32) (int32, int32) {
- v.pdom.pre = pre
- pre++
- for _, child := range v.pdom.children {
- pre, post = numberPostDomTree(child, pre, post)
- }
- v.pdom.post = post
- post++
- return pre, post
-}
-
-// Testing utilities ----------------------------------------
-
-// sanityCheckDomTree checks the correctness of the dominator tree
-// computed by the LT algorithm by comparing against the dominance
-// relation computed by a naive Kildall-style forward dataflow
-// analysis (Algorithm 10.16 from the "Dragon" book).
-//
-func sanityCheckDomTree(f *Function) {
- n := len(f.Blocks)
-
- // D[i] is the set of blocks that dominate f.Blocks[i],
- // represented as a bit-set of block indices.
- D := make([]big.Int, n)
-
- one := big.NewInt(1)
-
- // all is the set of all blocks; constant.
- var all big.Int
- all.Set(one).Lsh(&all, uint(n)).Sub(&all, one)
-
- // Initialization.
- for i := range f.Blocks {
- if i == 0 {
- // A root is dominated only by itself.
- D[i].SetBit(&D[0], 0, 1)
- } else {
- // All other blocks are (initially) dominated
- // by every block.
- D[i].Set(&all)
- }
- }
-
- // Iteration until fixed point.
- for changed := true; changed; {
- changed = false
- for i, b := range f.Blocks {
- if i == 0 {
- continue
- }
- // Compute intersection across predecessors.
- var x big.Int
- x.Set(&all)
- for _, pred := range b.Preds {
- x.And(&x, &D[pred.Index])
- }
- if b == f.Exit {
- for _, p := range f.Blocks {
- if f.fakeExits.Has(p) {
- x.And(&x, &D[p.Index])
- }
- }
- }
- x.SetBit(&x, i, 1) // a block always dominates itself.
- if D[i].Cmp(&x) != 0 {
- D[i].Set(&x)
- changed = true
- }
- }
- }
-
- // Check the entire relation. O(n^2).
- ok := true
- for i := 0; i < n; i++ {
- for j := 0; j < n; j++ {
- b, c := f.Blocks[i], f.Blocks[j]
- actual := b.Dominates(c)
- expected := D[j].Bit(i) == 1
- if actual != expected {
- fmt.Fprintf(os.Stderr, "dominates(%s, %s)==%t, want %t\n", b, c, actual, expected)
- ok = false
- }
- }
- }
-
- preorder := f.DomPreorder()
- for _, b := range f.Blocks {
- if got := preorder[b.dom.pre]; got != b {
- fmt.Fprintf(os.Stderr, "preorder[%d]==%s, want %s\n", b.dom.pre, got, b)
- ok = false
- }
- }
-
- if !ok {
- panic("sanityCheckDomTree failed for " + f.String())
- }
-
-}
-
-// Printing functions ----------------------------------------
-
-// printDomTree prints the dominator tree as text, using indentation.
-//lint:ignore U1000 used during debugging
-func printDomTreeText(buf *bytes.Buffer, v *BasicBlock, indent int) {
- fmt.Fprintf(buf, "%*s%s\n", 4*indent, "", v)
- for _, child := range v.dom.children {
- printDomTreeText(buf, child, indent+1)
- }
-}
-
-// printDomTreeDot prints the dominator tree of f in AT&T GraphViz
-// (.dot) format.
-//lint:ignore U1000 used during debugging
-func printDomTreeDot(buf io.Writer, f *Function) {
- fmt.Fprintln(buf, "//", f)
- fmt.Fprintln(buf, "digraph domtree {")
- for i, b := range f.Blocks {
- v := b.dom
- fmt.Fprintf(buf, "\tn%d [label=\"%s (%d, %d)\",shape=\"rectangle\"];\n", v.pre, b, v.pre, v.post)
- // TODO(adonovan): improve appearance of edges
- // belonging to both dominator tree and CFG.
-
- // Dominator tree edge.
- if i != 0 {
- fmt.Fprintf(buf, "\tn%d -> n%d [style=\"solid\",weight=100];\n", v.idom.dom.pre, v.pre)
- }
- // CFG edges.
- for _, pred := range b.Preds {
- fmt.Fprintf(buf, "\tn%d -> n%d [style=\"dotted\",weight=0];\n", pred.dom.pre, v.pre)
- }
- }
- fmt.Fprintln(buf, "}")
-}
-
-// printDomTree prints the dominator tree as text, using indentation.
-//lint:ignore U1000 used during debugging
-func printPostDomTreeText(buf io.Writer, v *BasicBlock, indent int) {
- fmt.Fprintf(buf, "%*s%s\n", 4*indent, "", v)
- for _, child := range v.pdom.children {
- printPostDomTreeText(buf, child, indent+1)
- }
-}
-
-// printDomTreeDot prints the dominator tree of f in AT&T GraphViz
-// (.dot) format.
-//lint:ignore U1000 used during debugging
-func printPostDomTreeDot(buf io.Writer, f *Function) {
- fmt.Fprintln(buf, "//", f)
- fmt.Fprintln(buf, "digraph pdomtree {")
- for _, b := range f.Blocks {
- v := b.pdom
- fmt.Fprintf(buf, "\tn%d [label=\"%s (%d, %d)\",shape=\"rectangle\"];\n", v.pre, b, v.pre, v.post)
- // TODO(adonovan): improve appearance of edges
- // belonging to both dominator tree and CFG.
-
- // Dominator tree edge.
- if b != f.Exit {
- fmt.Fprintf(buf, "\tn%d -> n%d [style=\"solid\",weight=100];\n", v.idom.pdom.pre, v.pre)
- }
- // CFG edges.
- for _, pred := range b.Preds {
- fmt.Fprintf(buf, "\tn%d -> n%d [style=\"dotted\",weight=0];\n", pred.pdom.pre, v.pre)
- }
- }
- fmt.Fprintln(buf, "}")
-}
diff --git a/vendor/honnef.co/go/tools/ir/emit.go b/vendor/honnef.co/go/tools/ir/emit.go
deleted file mode 100644
index 5fa137af9ec..00000000000
--- a/vendor/honnef.co/go/tools/ir/emit.go
+++ /dev/null
@@ -1,450 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ir
-
-// Helpers for emitting IR instructions.
-
-import (
- "fmt"
- "go/ast"
- "go/constant"
- "go/token"
- "go/types"
-)
-
-// emitNew emits to f a new (heap Alloc) instruction allocating an
-// object of type typ. pos is the optional source location.
-//
-func emitNew(f *Function, typ types.Type, source ast.Node) *Alloc {
- v := &Alloc{Heap: true}
- v.setType(types.NewPointer(typ))
- f.emit(v, source)
- return v
-}
-
-// emitLoad emits to f an instruction to load the address addr into a
-// new temporary, and returns the value so defined.
-//
-func emitLoad(f *Function, addr Value, source ast.Node) *Load {
- v := &Load{X: addr}
- v.setType(deref(addr.Type()))
- f.emit(v, source)
- return v
-}
-
-func emitRecv(f *Function, ch Value, commaOk bool, typ types.Type, source ast.Node) Value {
- recv := &Recv{
- Chan: ch,
- CommaOk: commaOk,
- }
- recv.setType(typ)
- return f.emit(recv, source)
-}
-
-// emitDebugRef emits to f a DebugRef pseudo-instruction associating
-// expression e with value v.
-//
-func emitDebugRef(f *Function, e ast.Expr, v Value, isAddr bool) {
- if !f.debugInfo() {
- return // debugging not enabled
- }
- if v == nil || e == nil {
- panic("nil")
- }
- var obj types.Object
- e = unparen(e)
- if id, ok := e.(*ast.Ident); ok {
- if isBlankIdent(id) {
- return
- }
- obj = f.Pkg.objectOf(id)
- switch obj.(type) {
- case *types.Nil, *types.Const, *types.Builtin:
- return
- }
- }
- f.emit(&DebugRef{
- X: v,
- Expr: e,
- IsAddr: isAddr,
- object: obj,
- }, nil)
-}
-
-// emitArith emits to f code to compute the binary operation op(x, y)
-// where op is an eager shift, logical or arithmetic operation.
-// (Use emitCompare() for comparisons and Builder.logicalBinop() for
-// non-eager operations.)
-//
-func emitArith(f *Function, op token.Token, x, y Value, t types.Type, source ast.Node) Value {
- switch op {
- case token.SHL, token.SHR:
- x = emitConv(f, x, t, source)
- // y may be signed or an 'untyped' constant.
- // TODO(adonovan): whence signed values?
- if b, ok := y.Type().Underlying().(*types.Basic); ok && b.Info()&types.IsUnsigned == 0 {
- y = emitConv(f, y, types.Typ[types.Uint64], source)
- }
-
- case token.ADD, token.SUB, token.MUL, token.QUO, token.REM, token.AND, token.OR, token.XOR, token.AND_NOT:
- x = emitConv(f, x, t, source)
- y = emitConv(f, y, t, source)
-
- default:
- panic("illegal op in emitArith: " + op.String())
-
- }
- v := &BinOp{
- Op: op,
- X: x,
- Y: y,
- }
- v.setType(t)
- return f.emit(v, source)
-}
-
-// emitCompare emits to f code compute the boolean result of
-// comparison comparison 'x op y'.
-//
-func emitCompare(f *Function, op token.Token, x, y Value, source ast.Node) Value {
- xt := x.Type().Underlying()
- yt := y.Type().Underlying()
-
- // Special case to optimise a tagless SwitchStmt so that
- // these are equivalent
- // switch { case e: ...}
- // switch true { case e: ... }
- // if e==true { ... }
- // even in the case when e's type is an interface.
- // TODO(adonovan): opt: generalise to x==true, false!=y, etc.
- if x, ok := x.(*Const); ok && op == token.EQL && x.Value != nil && x.Value.Kind() == constant.Bool && constant.BoolVal(x.Value) {
- if yt, ok := yt.(*types.Basic); ok && yt.Info()&types.IsBoolean != 0 {
- return y
- }
- }
-
- if types.Identical(xt, yt) {
- // no conversion necessary
- } else if _, ok := xt.(*types.Interface); ok {
- y = emitConv(f, y, x.Type(), source)
- } else if _, ok := yt.(*types.Interface); ok {
- x = emitConv(f, x, y.Type(), source)
- } else if _, ok := x.(*Const); ok {
- x = emitConv(f, x, y.Type(), source)
- } else if _, ok := y.(*Const); ok {
- y = emitConv(f, y, x.Type(), source)
- //lint:ignore SA9003 no-op
- } else {
- // other cases, e.g. channels. No-op.
- }
-
- v := &BinOp{
- Op: op,
- X: x,
- Y: y,
- }
- v.setType(tBool)
- return f.emit(v, source)
-}
-
-// isValuePreserving returns true if a conversion from ut_src to
-// ut_dst is value-preserving, i.e. just a change of type.
-// Precondition: neither argument is a named type.
-//
-func isValuePreserving(ut_src, ut_dst types.Type) bool {
- // Identical underlying types?
- if structTypesIdentical(ut_dst, ut_src) {
- return true
- }
-
- switch ut_dst.(type) {
- case *types.Chan:
- // Conversion between channel types?
- _, ok := ut_src.(*types.Chan)
- return ok
-
- case *types.Pointer:
- // Conversion between pointers with identical base types?
- _, ok := ut_src.(*types.Pointer)
- return ok
- }
- return false
-}
-
-// emitConv emits to f code to convert Value val to exactly type typ,
-// and returns the converted value. Implicit conversions are required
-// by language assignability rules in assignments, parameter passing,
-// etc. Conversions cannot fail dynamically.
-//
-func emitConv(f *Function, val Value, typ types.Type, source ast.Node) Value {
- t_src := val.Type()
-
- // Identical types? Conversion is a no-op.
- if types.Identical(t_src, typ) {
- return val
- }
-
- ut_dst := typ.Underlying()
- ut_src := t_src.Underlying()
-
- // Just a change of type, but not value or representation?
- if isValuePreserving(ut_src, ut_dst) {
- c := &ChangeType{X: val}
- c.setType(typ)
- return f.emit(c, source)
- }
-
- // Conversion to, or construction of a value of, an interface type?
- if _, ok := ut_dst.(*types.Interface); ok {
- // Assignment from one interface type to another?
- if _, ok := ut_src.(*types.Interface); ok {
- c := &ChangeInterface{X: val}
- c.setType(typ)
- return f.emit(c, source)
- }
-
- // Untyped nil constant? Return interface-typed nil constant.
- if ut_src == tUntypedNil {
- return emitConst(f, nilConst(typ))
- }
-
- // Convert (non-nil) "untyped" literals to their default type.
- if t, ok := ut_src.(*types.Basic); ok && t.Info()&types.IsUntyped != 0 {
- val = emitConv(f, val, types.Default(ut_src), source)
- }
-
- f.Pkg.Prog.needMethodsOf(val.Type())
- mi := &MakeInterface{X: val}
- mi.setType(typ)
- return f.emit(mi, source)
- }
-
- // Conversion of a compile-time constant value?
- if c, ok := val.(*Const); ok {
- if _, ok := ut_dst.(*types.Basic); ok || c.IsNil() {
- // Conversion of a compile-time constant to
- // another constant type results in a new
- // constant of the destination type and
- // (initially) the same abstract value.
- // We don't truncate the value yet.
- return emitConst(f, NewConst(c.Value, typ))
- }
-
- // We're converting from constant to non-constant type,
- // e.g. string -> []byte/[]rune.
- }
-
- // A representation-changing conversion?
- // At least one of {ut_src,ut_dst} must be *Basic.
- // (The other may be []byte or []rune.)
- _, ok1 := ut_src.(*types.Basic)
- _, ok2 := ut_dst.(*types.Basic)
- if ok1 || ok2 {
- c := &Convert{X: val}
- c.setType(typ)
- return f.emit(c, source)
- }
-
- panic(fmt.Sprintf("in %s: cannot convert %s (%s) to %s", f, val, val.Type(), typ))
-}
-
-// emitStore emits to f an instruction to store value val at location
-// addr, applying implicit conversions as required by assignability rules.
-//
-func emitStore(f *Function, addr, val Value, source ast.Node) *Store {
- s := &Store{
- Addr: addr,
- Val: emitConv(f, val, deref(addr.Type()), source),
- }
- // make sure we call getMem after the call to emitConv, which may
- // itself update the memory state
- f.emit(s, source)
- return s
-}
-
-// emitJump emits to f a jump to target, and updates the control-flow graph.
-// Postcondition: f.currentBlock is nil.
-//
-func emitJump(f *Function, target *BasicBlock, source ast.Node) *Jump {
- b := f.currentBlock
- j := new(Jump)
- b.emit(j, source)
- addEdge(b, target)
- f.currentBlock = nil
- return j
-}
-
-// emitIf emits to f a conditional jump to tblock or fblock based on
-// cond, and updates the control-flow graph.
-// Postcondition: f.currentBlock is nil.
-//
-func emitIf(f *Function, cond Value, tblock, fblock *BasicBlock, source ast.Node) *If {
- b := f.currentBlock
- stmt := &If{Cond: cond}
- b.emit(stmt, source)
- addEdge(b, tblock)
- addEdge(b, fblock)
- f.currentBlock = nil
- return stmt
-}
-
-// emitExtract emits to f an instruction to extract the index'th
-// component of tuple. It returns the extracted value.
-//
-func emitExtract(f *Function, tuple Value, index int, source ast.Node) Value {
- e := &Extract{Tuple: tuple, Index: index}
- e.setType(tuple.Type().(*types.Tuple).At(index).Type())
- return f.emit(e, source)
-}
-
-// emitTypeAssert emits to f a type assertion value := x.(t) and
-// returns the value. x.Type() must be an interface.
-//
-func emitTypeAssert(f *Function, x Value, t types.Type, source ast.Node) Value {
- a := &TypeAssert{X: x, AssertedType: t}
- a.setType(t)
- return f.emit(a, source)
-}
-
-// emitTypeTest emits to f a type test value,ok := x.(t) and returns
-// a (value, ok) tuple. x.Type() must be an interface.
-//
-func emitTypeTest(f *Function, x Value, t types.Type, source ast.Node) Value {
- a := &TypeAssert{
- X: x,
- AssertedType: t,
- CommaOk: true,
- }
- a.setType(types.NewTuple(
- newVar("value", t),
- varOk,
- ))
- return f.emit(a, source)
-}
-
-// emitTailCall emits to f a function call in tail position. The
-// caller is responsible for all fields of 'call' except its type.
-// Intended for wrapper methods.
-// Precondition: f does/will not use deferred procedure calls.
-// Postcondition: f.currentBlock is nil.
-//
-func emitTailCall(f *Function, call *Call, source ast.Node) {
- tresults := f.Signature.Results()
- nr := tresults.Len()
- if nr == 1 {
- call.typ = tresults.At(0).Type()
- } else {
- call.typ = tresults
- }
- tuple := f.emit(call, source)
- var ret Return
- switch nr {
- case 0:
- // no-op
- case 1:
- ret.Results = []Value{tuple}
- default:
- for i := 0; i < nr; i++ {
- v := emitExtract(f, tuple, i, source)
- // TODO(adonovan): in principle, this is required:
- // v = emitConv(f, o.Type, f.Signature.Results[i].Type)
- // but in practice emitTailCall is only used when
- // the types exactly match.
- ret.Results = append(ret.Results, v)
- }
- }
-
- f.Exit = f.newBasicBlock("exit")
- emitJump(f, f.Exit, source)
- f.currentBlock = f.Exit
- f.emit(&ret, source)
- f.currentBlock = nil
-}
-
-// emitImplicitSelections emits to f code to apply the sequence of
-// implicit field selections specified by indices to base value v, and
-// returns the selected value.
-//
-// If v is the address of a struct, the result will be the address of
-// a field; if it is the value of a struct, the result will be the
-// value of a field.
-//
-func emitImplicitSelections(f *Function, v Value, indices []int, source ast.Node) Value {
- for _, index := range indices {
- fld := deref(v.Type()).Underlying().(*types.Struct).Field(index)
-
- if isPointer(v.Type()) {
- instr := &FieldAddr{
- X: v,
- Field: index,
- }
- instr.setType(types.NewPointer(fld.Type()))
- v = f.emit(instr, source)
- // Load the field's value iff indirectly embedded.
- if isPointer(fld.Type()) {
- v = emitLoad(f, v, source)
- }
- } else {
- instr := &Field{
- X: v,
- Field: index,
- }
- instr.setType(fld.Type())
- v = f.emit(instr, source)
- }
- }
- return v
-}
-
-// emitFieldSelection emits to f code to select the index'th field of v.
-//
-// If wantAddr, the input must be a pointer-to-struct and the result
-// will be the field's address; otherwise the result will be the
-// field's value.
-// Ident id is used for position and debug info.
-//
-func emitFieldSelection(f *Function, v Value, index int, wantAddr bool, id *ast.Ident) Value {
- fld := deref(v.Type()).Underlying().(*types.Struct).Field(index)
- if isPointer(v.Type()) {
- instr := &FieldAddr{
- X: v,
- Field: index,
- }
- instr.setSource(id)
- instr.setType(types.NewPointer(fld.Type()))
- v = f.emit(instr, id)
- // Load the field's value iff we don't want its address.
- if !wantAddr {
- v = emitLoad(f, v, id)
- }
- } else {
- instr := &Field{
- X: v,
- Field: index,
- }
- instr.setSource(id)
- instr.setType(fld.Type())
- v = f.emit(instr, id)
- }
- emitDebugRef(f, id, v, wantAddr)
- return v
-}
-
-// zeroValue emits to f code to produce a zero value of type t,
-// and returns it.
-//
-func zeroValue(f *Function, t types.Type, source ast.Node) Value {
- switch t.Underlying().(type) {
- case *types.Struct, *types.Array:
- return emitLoad(f, f.addLocal(t, source), source)
- default:
- return emitConst(f, zeroConst(t))
- }
-}
-
-func emitConst(f *Function, c *Const) *Const {
- f.consts = append(f.consts, c)
- return c
-}
diff --git a/vendor/honnef.co/go/tools/ir/exits.go b/vendor/honnef.co/go/tools/ir/exits.go
deleted file mode 100644
index 10cda7bb66e..00000000000
--- a/vendor/honnef.co/go/tools/ir/exits.go
+++ /dev/null
@@ -1,271 +0,0 @@
-package ir
-
-import (
- "go/types"
-)
-
-func (b *builder) buildExits(fn *Function) {
- if obj := fn.Object(); obj != nil {
- switch obj.Pkg().Path() {
- case "runtime":
- switch obj.Name() {
- case "exit":
- fn.WillExit = true
- return
- case "throw":
- fn.WillExit = true
- return
- case "Goexit":
- fn.WillUnwind = true
- return
- }
- case "github.com/sirupsen/logrus":
- switch obj.(*types.Func).FullName() {
- case "(*github.com/sirupsen/logrus.Logger).Exit":
- // Technically, this method does not unconditionally exit
- // the process. It dynamically calls a function stored in
- // the logger. If the function is nil, it defaults to
- // os.Exit.
- //
- // The main intent of this method is to terminate the
- // process, and that's what the vast majority of people
- // will use it for. We'll happily accept some false
- // negatives to avoid a lot of false positives.
- fn.WillExit = true
- return
- case "(*github.com/sirupsen/logrus.Logger).Panic",
- "(*github.com/sirupsen/logrus.Logger).Panicf",
- "(*github.com/sirupsen/logrus.Logger).Panicln":
-
- // These methods will always panic, but that's not
- // statically known from the code alone, because they
- // take a detour through the generic Log methods.
- fn.WillUnwind = true
- return
- case "(*github.com/sirupsen/logrus.Entry).Panicf",
- "(*github.com/sirupsen/logrus.Entry).Panicln":
-
- // Entry.Panic has an explicit panic, but Panicf and
- // Panicln do not, relying fully on the generic Log
- // method.
- fn.WillUnwind = true
- return
- case "(*github.com/sirupsen/logrus.Logger).Log",
- "(*github.com/sirupsen/logrus.Logger).Logf",
- "(*github.com/sirupsen/logrus.Logger).Logln":
- // TODO(dh): we cannot handle these case. Whether they
- // exit or unwind depends on the level, which is set
- // via the first argument. We don't currently support
- // call-site-specific exit information.
- }
- }
- }
-
- buildDomTree(fn)
-
- isRecoverCall := func(instr Instruction) bool {
- if instr, ok := instr.(*Call); ok {
- if builtin, ok := instr.Call.Value.(*Builtin); ok {
- if builtin.Name() == "recover" {
- return true
- }
- }
- }
- return false
- }
-
- // All panics branch to the exit block, which means that if every
- // possible path through the function panics, then all
- // predecessors of the exit block must panic.
- willPanic := true
- for _, pred := range fn.Exit.Preds {
- if _, ok := pred.Control().(*Panic); !ok {
- willPanic = false
- }
- }
- if willPanic {
- recovers := false
- recoverLoop:
- for _, u := range fn.Blocks {
- for _, instr := range u.Instrs {
- if instr, ok := instr.(*Defer); ok {
- call := instr.Call.StaticCallee()
- if call == nil {
- // not a static call, so we can't be sure the
- // deferred call isn't calling recover
- recovers = true
- break recoverLoop
- }
- if len(call.Blocks) == 0 {
- // external function, we don't know what's
- // happening inside it
- //
- // TODO(dh): this includes functions from
- // imported packages, due to how go/analysis
- // works. We could introduce another fact,
- // like we've done for exiting and unwinding,
- // but it doesn't seem worth it. Virtually all
- // uses of recover will be in closures.
- recovers = true
- break recoverLoop
- }
- for _, y := range call.Blocks {
- for _, instr2 := range y.Instrs {
- if isRecoverCall(instr2) {
- recovers = true
- break recoverLoop
- }
- }
- }
- }
- }
- }
- if !recovers {
- fn.WillUnwind = true
- return
- }
- }
-
- // TODO(dh): don't check that any specific call dominates the exit
- // block. instead, check that all calls combined cover every
- // possible path through the function.
- exits := NewBlockSet(len(fn.Blocks))
- unwinds := NewBlockSet(len(fn.Blocks))
- for _, u := range fn.Blocks {
- for _, instr := range u.Instrs {
- if instr, ok := instr.(CallInstruction); ok {
- switch instr.(type) {
- case *Defer, *Call:
- default:
- continue
- }
- if instr.Common().IsInvoke() {
- // give up
- return
- }
- var call *Function
- switch instr.Common().Value.(type) {
- case *Function, *MakeClosure:
- call = instr.Common().StaticCallee()
- case *Builtin:
- // the only builtins that affect control flow are
- // panic and recover, and we've already handled
- // those
- continue
- default:
- // dynamic dispatch
- return
- }
- // buildFunction is idempotent. if we're part of a
- // (mutually) recursive call chain, then buildFunction
- // will immediately return, and fn.WillExit will be false.
- if call.Package() == fn.Package() {
- b.buildFunction(call)
- }
- dom := u.Dominates(fn.Exit)
- if call.WillExit {
- if dom {
- fn.WillExit = true
- return
- }
- exits.Add(u)
- } else if call.WillUnwind {
- if dom {
- fn.WillUnwind = true
- return
- }
- unwinds.Add(u)
- }
- }
- }
- }
-
- // depth-first search trying to find a path to the exit block that
- // doesn't cross any of the blacklisted blocks
- seen := NewBlockSet(len(fn.Blocks))
- var findPath func(root *BasicBlock, bl *BlockSet) bool
- findPath = func(root *BasicBlock, bl *BlockSet) bool {
- if root == fn.Exit {
- return true
- }
- if seen.Has(root) {
- return false
- }
- if bl.Has(root) {
- return false
- }
- seen.Add(root)
- for _, succ := range root.Succs {
- if findPath(succ, bl) {
- return true
- }
- }
- return false
- }
-
- if exits.Num() > 0 {
- if !findPath(fn.Blocks[0], exits) {
- fn.WillExit = true
- return
- }
- }
- if unwinds.Num() > 0 {
- seen.Clear()
- if !findPath(fn.Blocks[0], unwinds) {
- fn.WillUnwind = true
- return
- }
- }
-}
-
-func (b *builder) addUnreachables(fn *Function) {
- for _, bb := range fn.Blocks {
- for i, instr := range bb.Instrs {
- if instr, ok := instr.(*Call); ok {
- var call *Function
- switch v := instr.Common().Value.(type) {
- case *Function:
- call = v
- case *MakeClosure:
- call = v.Fn.(*Function)
- }
- if call == nil {
- continue
- }
- if call.Package() == fn.Package() {
- // make sure we have information on all functions in this package
- b.buildFunction(call)
- }
- if call.WillExit {
- // This call will cause the process to terminate.
- // Remove remaining instructions in the block and
- // replace any control flow with Unreachable.
- for _, succ := range bb.Succs {
- succ.removePred(bb)
- }
- bb.Succs = bb.Succs[:0]
-
- bb.Instrs = bb.Instrs[:i+1]
- bb.emit(new(Unreachable), instr.Source())
- addEdge(bb, fn.Exit)
- break
- } else if call.WillUnwind {
- // This call will cause the goroutine to terminate
- // and defers to run (i.e. a panic or
- // runtime.Goexit). Remove remaining instructions
- // in the block and replace any control flow with
- // an unconditional jump to the exit block.
- for _, succ := range bb.Succs {
- succ.removePred(bb)
- }
- bb.Succs = bb.Succs[:0]
-
- bb.Instrs = bb.Instrs[:i+1]
- bb.emit(new(Jump), instr.Source())
- addEdge(bb, fn.Exit)
- break
- }
- }
- }
- }
-}
diff --git a/vendor/honnef.co/go/tools/ir/func.go b/vendor/honnef.co/go/tools/ir/func.go
deleted file mode 100644
index 386d82b6708..00000000000
--- a/vendor/honnef.co/go/tools/ir/func.go
+++ /dev/null
@@ -1,961 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ir
-
-// This file implements the Function and BasicBlock types.
-
-import (
- "bytes"
- "fmt"
- "go/ast"
- "go/constant"
- "go/format"
- "go/token"
- "go/types"
- "io"
- "os"
- "strings"
-)
-
-// addEdge adds a control-flow graph edge from from to to.
-func addEdge(from, to *BasicBlock) {
- from.Succs = append(from.Succs, to)
- to.Preds = append(to.Preds, from)
-}
-
-// Control returns the last instruction in the block.
-func (b *BasicBlock) Control() Instruction {
- if len(b.Instrs) == 0 {
- return nil
- }
- return b.Instrs[len(b.Instrs)-1]
-}
-
-// SIgmaFor returns the sigma node for v coming from pred.
-func (b *BasicBlock) SigmaFor(v Value, pred *BasicBlock) *Sigma {
- for _, instr := range b.Instrs {
- sigma, ok := instr.(*Sigma)
- if !ok {
- // no more sigmas
- return nil
- }
- if sigma.From == pred && sigma.X == v {
- return sigma
- }
- }
- return nil
-}
-
-// Parent returns the function that contains block b.
-func (b *BasicBlock) Parent() *Function { return b.parent }
-
-// String returns a human-readable label of this block.
-// It is not guaranteed unique within the function.
-//
-func (b *BasicBlock) String() string {
- return fmt.Sprintf("%d", b.Index)
-}
-
-// emit appends an instruction to the current basic block.
-// If the instruction defines a Value, it is returned.
-//
-func (b *BasicBlock) emit(i Instruction, source ast.Node) Value {
- i.setSource(source)
- i.setBlock(b)
- b.Instrs = append(b.Instrs, i)
- v, _ := i.(Value)
- return v
-}
-
-// predIndex returns the i such that b.Preds[i] == c or panics if
-// there is none.
-func (b *BasicBlock) predIndex(c *BasicBlock) int {
- for i, pred := range b.Preds {
- if pred == c {
- return i
- }
- }
- panic(fmt.Sprintf("no edge %s -> %s", c, b))
-}
-
-// succIndex returns the i such that b.Succs[i] == c or -1 if there is none.
-func (b *BasicBlock) succIndex(c *BasicBlock) int {
- for i, succ := range b.Succs {
- if succ == c {
- return i
- }
- }
- return -1
-}
-
-// hasPhi returns true if b.Instrs contains φ-nodes.
-func (b *BasicBlock) hasPhi() bool {
- _, ok := b.Instrs[0].(*Phi)
- return ok
-}
-
-func (b *BasicBlock) Phis() []Instruction {
- return b.phis()
-}
-
-// phis returns the prefix of b.Instrs containing all the block's φ-nodes.
-func (b *BasicBlock) phis() []Instruction {
- for i, instr := range b.Instrs {
- if _, ok := instr.(*Phi); !ok {
- return b.Instrs[:i]
- }
- }
- return nil // unreachable in well-formed blocks
-}
-
-// replacePred replaces all occurrences of p in b's predecessor list with q.
-// Ordinarily there should be at most one.
-//
-func (b *BasicBlock) replacePred(p, q *BasicBlock) {
- for i, pred := range b.Preds {
- if pred == p {
- b.Preds[i] = q
- }
- }
-}
-
-// replaceSucc replaces all occurrences of p in b's successor list with q.
-// Ordinarily there should be at most one.
-//
-func (b *BasicBlock) replaceSucc(p, q *BasicBlock) {
- for i, succ := range b.Succs {
- if succ == p {
- b.Succs[i] = q
- }
- }
-}
-
-// removePred removes all occurrences of p in b's
-// predecessor list and φ-nodes.
-// Ordinarily there should be at most one.
-//
-func (b *BasicBlock) removePred(p *BasicBlock) {
- phis := b.phis()
-
- // We must preserve edge order for φ-nodes.
- j := 0
- for i, pred := range b.Preds {
- if pred != p {
- b.Preds[j] = b.Preds[i]
- // Strike out φ-edge too.
- for _, instr := range phis {
- phi := instr.(*Phi)
- phi.Edges[j] = phi.Edges[i]
- }
- j++
- }
- }
- // Nil out b.Preds[j:] and φ-edges[j:] to aid GC.
- for i := j; i < len(b.Preds); i++ {
- b.Preds[i] = nil
- for _, instr := range phis {
- instr.(*Phi).Edges[i] = nil
- }
- }
- b.Preds = b.Preds[:j]
- for _, instr := range phis {
- phi := instr.(*Phi)
- phi.Edges = phi.Edges[:j]
- }
-}
-
-// Destinations associated with unlabelled for/switch/select stmts.
-// We push/pop one of these as we enter/leave each construct and for
-// each BranchStmt we scan for the innermost target of the right type.
-//
-type targets struct {
- tail *targets // rest of stack
- _break *BasicBlock
- _continue *BasicBlock
- _fallthrough *BasicBlock
-}
-
-// Destinations associated with a labelled block.
-// We populate these as labels are encountered in forward gotos or
-// labelled statements.
-//
-type lblock struct {
- _goto *BasicBlock
- _break *BasicBlock
- _continue *BasicBlock
-}
-
-// labelledBlock returns the branch target associated with the
-// specified label, creating it if needed.
-//
-func (f *Function) labelledBlock(label *ast.Ident) *lblock {
- lb := f.lblocks[label.Obj]
- if lb == nil {
- lb = &lblock{_goto: f.newBasicBlock(label.Name)}
- if f.lblocks == nil {
- f.lblocks = make(map[*ast.Object]*lblock)
- }
- f.lblocks[label.Obj] = lb
- }
- return lb
-}
-
-// addParam adds a (non-escaping) parameter to f.Params of the
-// specified name, type and source position.
-//
-func (f *Function) addParam(name string, typ types.Type, source ast.Node) *Parameter {
- var b *BasicBlock
- if len(f.Blocks) > 0 {
- b = f.Blocks[0]
- }
- v := &Parameter{
- name: name,
- }
- v.setBlock(b)
- v.setType(typ)
- v.setSource(source)
- f.Params = append(f.Params, v)
- if b != nil {
- // There may be no blocks if this function has no body. We
- // still create params, but aren't interested in the
- // instruction.
- f.Blocks[0].Instrs = append(f.Blocks[0].Instrs, v)
- }
- return v
-}
-
-func (f *Function) addParamObj(obj types.Object, source ast.Node) *Parameter {
- name := obj.Name()
- if name == "" {
- name = fmt.Sprintf("arg%d", len(f.Params))
- }
- param := f.addParam(name, obj.Type(), source)
- param.object = obj
- return param
-}
-
-// addSpilledParam declares a parameter that is pre-spilled to the
-// stack; the function body will load/store the spilled location.
-// Subsequent lifting will eliminate spills where possible.
-//
-func (f *Function) addSpilledParam(obj types.Object, source ast.Node) {
- param := f.addParamObj(obj, source)
- spill := &Alloc{}
- spill.setType(types.NewPointer(obj.Type()))
- spill.source = source
- f.objects[obj] = spill
- f.Locals = append(f.Locals, spill)
- f.emit(spill, source)
- emitStore(f, spill, param, source)
- // f.emit(&Store{Addr: spill, Val: param})
-}
-
-// startBody initializes the function prior to generating IR code for its body.
-// Precondition: f.Type() already set.
-//
-func (f *Function) startBody() {
- entry := f.newBasicBlock("entry")
- f.currentBlock = entry
- f.objects = make(map[types.Object]Value) // needed for some synthetics, e.g. init
-}
-
-func (f *Function) blockset(i int) *BlockSet {
- bs := &f.blocksets[i]
- if len(bs.values) != len(f.Blocks) {
- if cap(bs.values) >= len(f.Blocks) {
- bs.values = bs.values[:len(f.Blocks)]
- bs.Clear()
- } else {
- bs.values = make([]bool, len(f.Blocks))
- }
- } else {
- bs.Clear()
- }
- return bs
-}
-
-func (f *Function) exitBlock() {
- old := f.currentBlock
-
- f.Exit = f.newBasicBlock("exit")
- f.currentBlock = f.Exit
-
- ret := f.results()
- results := make([]Value, len(ret))
- // Run function calls deferred in this
- // function when explicitly returning from it.
- f.emit(new(RunDefers), nil)
- for i, r := range ret {
- results[i] = emitLoad(f, r, nil)
- }
-
- f.emit(&Return{Results: results}, nil)
- f.currentBlock = old
-}
-
-// createSyntacticParams populates f.Params and generates code (spills
-// and named result locals) for all the parameters declared in the
-// syntax. In addition it populates the f.objects mapping.
-//
-// Preconditions:
-// f.startBody() was called.
-// Postcondition:
-// len(f.Params) == len(f.Signature.Params) + (f.Signature.Recv() ? 1 : 0)
-//
-func (f *Function) createSyntacticParams(recv *ast.FieldList, functype *ast.FuncType) {
- // Receiver (at most one inner iteration).
- if recv != nil {
- for _, field := range recv.List {
- for _, n := range field.Names {
- f.addSpilledParam(f.Pkg.info.Defs[n], n)
- }
- // Anonymous receiver? No need to spill.
- if field.Names == nil {
- f.addParamObj(f.Signature.Recv(), field)
- }
- }
- }
-
- // Parameters.
- if functype.Params != nil {
- n := len(f.Params) // 1 if has recv, 0 otherwise
- for _, field := range functype.Params.List {
- for _, n := range field.Names {
- f.addSpilledParam(f.Pkg.info.Defs[n], n)
- }
- // Anonymous parameter? No need to spill.
- if field.Names == nil {
- f.addParamObj(f.Signature.Params().At(len(f.Params)-n), field)
- }
- }
- }
-
- // Named results.
- if functype.Results != nil {
- for _, field := range functype.Results.List {
- // Implicit "var" decl of locals for named results.
- for _, n := range field.Names {
- f.namedResults = append(f.namedResults, f.addLocalForIdent(n))
- }
- }
-
- if len(f.namedResults) == 0 {
- sig := f.Signature.Results()
- for i := 0; i < sig.Len(); i++ {
- // XXX position information
- v := f.addLocal(sig.At(i).Type(), nil)
- f.implicitResults = append(f.implicitResults, v)
- }
- }
- }
-}
-
-func numberNodes(f *Function) {
- var base ID
- for _, b := range f.Blocks {
- for _, instr := range b.Instrs {
- if instr == nil {
- continue
- }
- base++
- instr.setID(base)
- }
- }
-}
-
-// buildReferrers populates the def/use information in all non-nil
-// Value.Referrers slice.
-// Precondition: all such slices are initially empty.
-func buildReferrers(f *Function) {
- var rands []*Value
- for _, b := range f.Blocks {
- for _, instr := range b.Instrs {
- rands = instr.Operands(rands[:0]) // recycle storage
- for _, rand := range rands {
- if r := *rand; r != nil {
- if ref := r.Referrers(); ref != nil {
- *ref = append(*ref, instr)
- }
- }
- }
- }
- }
-}
-
-func (f *Function) emitConsts() {
- if len(f.Blocks) == 0 {
- f.consts = nil
- return
- }
-
- // TODO(dh): our deduplication only works on booleans and
- // integers. other constants are represented as pointers to
- // things.
- if len(f.consts) == 0 {
- return
- } else if len(f.consts) <= 32 {
- f.emitConstsFew()
- } else {
- f.emitConstsMany()
- }
-}
-
-func (f *Function) emitConstsFew() {
- dedup := make([]*Const, 0, 32)
- for _, c := range f.consts {
- if len(*c.Referrers()) == 0 {
- continue
- }
- found := false
- for _, d := range dedup {
- if c.typ == d.typ && c.Value == d.Value {
- replaceAll(c, d)
- found = true
- break
- }
- }
- if !found {
- dedup = append(dedup, c)
- }
- }
-
- instrs := make([]Instruction, len(f.Blocks[0].Instrs)+len(dedup))
- for i, c := range dedup {
- instrs[i] = c
- c.setBlock(f.Blocks[0])
- }
- copy(instrs[len(dedup):], f.Blocks[0].Instrs)
- f.Blocks[0].Instrs = instrs
- f.consts = nil
-}
-
-func (f *Function) emitConstsMany() {
- type constKey struct {
- typ types.Type
- value constant.Value
- }
-
- m := make(map[constKey]Value, len(f.consts))
- areNil := 0
- for i, c := range f.consts {
- if len(*c.Referrers()) == 0 {
- f.consts[i] = nil
- areNil++
- continue
- }
-
- k := constKey{
- typ: c.typ,
- value: c.Value,
- }
- if dup, ok := m[k]; !ok {
- m[k] = c
- } else {
- f.consts[i] = nil
- areNil++
- replaceAll(c, dup)
- }
- }
-
- instrs := make([]Instruction, len(f.Blocks[0].Instrs)+len(f.consts)-areNil)
- i := 0
- for _, c := range f.consts {
- if c != nil {
- instrs[i] = c
- c.setBlock(f.Blocks[0])
- i++
- }
- }
- copy(instrs[i:], f.Blocks[0].Instrs)
- f.Blocks[0].Instrs = instrs
- f.consts = nil
-}
-
-// buildFakeExits ensures that every block in the function is
-// reachable in reverse from the Exit block. This is required to build
-// a full post-dominator tree, and to ensure the exit block's
-// inclusion in the dominator tree.
-func buildFakeExits(fn *Function) {
- // Find back-edges via forward DFS
- fn.fakeExits = BlockSet{values: make([]bool, len(fn.Blocks))}
- seen := fn.blockset(0)
- backEdges := fn.blockset(1)
-
- var dfs func(b *BasicBlock)
- dfs = func(b *BasicBlock) {
- if !seen.Add(b) {
- backEdges.Add(b)
- return
- }
- for _, pred := range b.Succs {
- dfs(pred)
- }
- }
- dfs(fn.Blocks[0])
-buildLoop:
- for {
- seen := fn.blockset(2)
- var dfs func(b *BasicBlock)
- dfs = func(b *BasicBlock) {
- if !seen.Add(b) {
- return
- }
- for _, pred := range b.Preds {
- dfs(pred)
- }
- if b == fn.Exit {
- for _, b := range fn.Blocks {
- if fn.fakeExits.Has(b) {
- dfs(b)
- }
- }
- }
- }
- dfs(fn.Exit)
-
- for _, b := range fn.Blocks {
- if !seen.Has(b) && backEdges.Has(b) {
- // Block b is not reachable from the exit block. Add a
- // fake jump from b to exit, then try again. Note that we
- // only add one fake edge at a time, as it may make
- // multiple blocks reachable.
- //
- // We only consider those blocks that have back edges.
- // Any unreachable block that doesn't have a back edge
- // must flow into a loop, which by definition has a
- // back edge. Thus, by looking for loops, we should
- // need fewer fake edges overall.
- fn.fakeExits.Add(b)
- continue buildLoop
- }
- }
-
- break
- }
-}
-
-// finishBody() finalizes the function after IR code generation of its body.
-func (f *Function) finishBody() {
- f.objects = nil
- f.currentBlock = nil
- f.lblocks = nil
-
- // Remove from f.Locals any Allocs that escape to the heap.
- j := 0
- for _, l := range f.Locals {
- if !l.Heap {
- f.Locals[j] = l
- j++
- }
- }
- // Nil out f.Locals[j:] to aid GC.
- for i := j; i < len(f.Locals); i++ {
- f.Locals[i] = nil
- }
- f.Locals = f.Locals[:j]
-
- optimizeBlocks(f)
- buildReferrers(f)
- buildDomTree(f)
- buildPostDomTree(f)
-
- if f.Prog.mode&NaiveForm == 0 {
- lift(f)
- }
-
- // emit constants after lifting, because lifting may produce new constants.
- f.emitConsts()
-
- f.namedResults = nil // (used by lifting)
- f.implicitResults = nil
-
- numberNodes(f)
-
- defer f.wr.Close()
- f.wr.WriteFunc("start", "start", f)
-
- if f.Prog.mode&PrintFunctions != 0 {
- printMu.Lock()
- f.WriteTo(os.Stdout)
- printMu.Unlock()
- }
-
- if f.Prog.mode&SanityCheckFunctions != 0 {
- mustSanityCheck(f, nil)
- }
-}
-
-func isUselessPhi(phi *Phi) (Value, bool) {
- var v0 Value
- for _, e := range phi.Edges {
- if e == phi {
- continue
- }
- if v0 == nil {
- v0 = e
- }
- if v0 != e {
- if v0, ok := v0.(*Const); ok {
- if e, ok := e.(*Const); ok {
- if v0.typ == e.typ && v0.Value == e.Value {
- continue
- }
- }
- }
- return nil, false
- }
- }
- return v0, true
-}
-
-func (f *Function) RemoveNilBlocks() {
- f.removeNilBlocks()
-}
-
-// removeNilBlocks eliminates nils from f.Blocks and updates each
-// BasicBlock.Index. Use this after any pass that may delete blocks.
-//
-func (f *Function) removeNilBlocks() {
- j := 0
- for _, b := range f.Blocks {
- if b != nil {
- b.Index = j
- f.Blocks[j] = b
- j++
- }
- }
- // Nil out f.Blocks[j:] to aid GC.
- for i := j; i < len(f.Blocks); i++ {
- f.Blocks[i] = nil
- }
- f.Blocks = f.Blocks[:j]
-}
-
-// SetDebugMode sets the debug mode for package pkg. If true, all its
-// functions will include full debug info. This greatly increases the
-// size of the instruction stream, and causes Functions to depend upon
-// the ASTs, potentially keeping them live in memory for longer.
-//
-func (pkg *Package) SetDebugMode(debug bool) {
- // TODO(adonovan): do we want ast.File granularity?
- pkg.debug = debug
-}
-
-// debugInfo reports whether debug info is wanted for this function.
-func (f *Function) debugInfo() bool {
- return f.Pkg != nil && f.Pkg.debug
-}
-
-// addNamedLocal creates a local variable, adds it to function f and
-// returns it. Its name and type are taken from obj. Subsequent
-// calls to f.lookup(obj) will return the same local.
-//
-func (f *Function) addNamedLocal(obj types.Object, source ast.Node) *Alloc {
- l := f.addLocal(obj.Type(), source)
- f.objects[obj] = l
- return l
-}
-
-func (f *Function) addLocalForIdent(id *ast.Ident) *Alloc {
- return f.addNamedLocal(f.Pkg.info.Defs[id], id)
-}
-
-// addLocal creates an anonymous local variable of type typ, adds it
-// to function f and returns it. pos is the optional source location.
-//
-func (f *Function) addLocal(typ types.Type, source ast.Node) *Alloc {
- v := &Alloc{}
- v.setType(types.NewPointer(typ))
- f.Locals = append(f.Locals, v)
- f.emit(v, source)
- return v
-}
-
-// lookup returns the address of the named variable identified by obj
-// that is local to function f or one of its enclosing functions.
-// If escaping, the reference comes from a potentially escaping pointer
-// expression and the referent must be heap-allocated.
-//
-func (f *Function) lookup(obj types.Object, escaping bool) Value {
- if v, ok := f.objects[obj]; ok {
- if alloc, ok := v.(*Alloc); ok && escaping {
- alloc.Heap = true
- }
- return v // function-local var (address)
- }
-
- // Definition must be in an enclosing function;
- // plumb it through intervening closures.
- if f.parent == nil {
- panic("no ir.Value for " + obj.String())
- }
- outer := f.parent.lookup(obj, true) // escaping
- v := &FreeVar{
- name: obj.Name(),
- typ: outer.Type(),
- outer: outer,
- parent: f,
- }
- f.objects[obj] = v
- f.FreeVars = append(f.FreeVars, v)
- return v
-}
-
-// emit emits the specified instruction to function f.
-func (f *Function) emit(instr Instruction, source ast.Node) Value {
- return f.currentBlock.emit(instr, source)
-}
-
-// RelString returns the full name of this function, qualified by
-// package name, receiver type, etc.
-//
-// The specific formatting rules are not guaranteed and may change.
-//
-// Examples:
-// "math.IsNaN" // a package-level function
-// "(*bytes.Buffer).Bytes" // a declared method or a wrapper
-// "(*bytes.Buffer).Bytes$thunk" // thunk (func wrapping method; receiver is param 0)
-// "(*bytes.Buffer).Bytes$bound" // bound (func wrapping method; receiver supplied by closure)
-// "main.main$1" // an anonymous function in main
-// "main.init#1" // a declared init function
-// "main.init" // the synthesized package initializer
-//
-// When these functions are referred to from within the same package
-// (i.e. from == f.Pkg.Object), they are rendered without the package path.
-// For example: "IsNaN", "(*Buffer).Bytes", etc.
-//
-// All non-synthetic functions have distinct package-qualified names.
-// (But two methods may have the same name "(T).f" if one is a synthetic
-// wrapper promoting a non-exported method "f" from another package; in
-// that case, the strings are equal but the identifiers "f" are distinct.)
-//
-func (f *Function) RelString(from *types.Package) string {
- // Anonymous?
- if f.parent != nil {
- // An anonymous function's Name() looks like "parentName$1",
- // but its String() should include the type/package/etc.
- parent := f.parent.RelString(from)
- for i, anon := range f.parent.AnonFuncs {
- if anon == f {
- return fmt.Sprintf("%s$%d", parent, 1+i)
- }
- }
-
- return f.name // should never happen
- }
-
- // Method (declared or wrapper)?
- if recv := f.Signature.Recv(); recv != nil {
- return f.relMethod(from, recv.Type())
- }
-
- // Thunk?
- if f.method != nil {
- return f.relMethod(from, f.method.Recv())
- }
-
- // Bound?
- if len(f.FreeVars) == 1 && strings.HasSuffix(f.name, "$bound") {
- return f.relMethod(from, f.FreeVars[0].Type())
- }
-
- // Package-level function?
- // Prefix with package name for cross-package references only.
- if p := f.pkg(); p != nil && p != from {
- return fmt.Sprintf("%s.%s", p.Path(), f.name)
- }
-
- // Unknown.
- return f.name
-}
-
-func (f *Function) relMethod(from *types.Package, recv types.Type) string {
- return fmt.Sprintf("(%s).%s", relType(recv, from), f.name)
-}
-
-// writeSignature writes to buf the signature sig in declaration syntax.
-func writeSignature(buf *bytes.Buffer, from *types.Package, name string, sig *types.Signature, params []*Parameter) {
- buf.WriteString("func ")
- if recv := sig.Recv(); recv != nil {
- buf.WriteString("(")
- if n := params[0].Name(); n != "" {
- buf.WriteString(n)
- buf.WriteString(" ")
- }
- types.WriteType(buf, params[0].Type(), types.RelativeTo(from))
- buf.WriteString(") ")
- }
- buf.WriteString(name)
- types.WriteSignature(buf, sig, types.RelativeTo(from))
-}
-
-func (f *Function) pkg() *types.Package {
- if f.Pkg != nil {
- return f.Pkg.Pkg
- }
- return nil
-}
-
-var _ io.WriterTo = (*Function)(nil) // *Function implements io.Writer
-
-func (f *Function) WriteTo(w io.Writer) (int64, error) {
- var buf bytes.Buffer
- WriteFunction(&buf, f)
- n, err := w.Write(buf.Bytes())
- return int64(n), err
-}
-
-// WriteFunction writes to buf a human-readable "disassembly" of f.
-func WriteFunction(buf *bytes.Buffer, f *Function) {
- fmt.Fprintf(buf, "# Name: %s\n", f.String())
- if f.Pkg != nil {
- fmt.Fprintf(buf, "# Package: %s\n", f.Pkg.Pkg.Path())
- }
- if syn := f.Synthetic; syn != "" {
- fmt.Fprintln(buf, "# Synthetic:", syn)
- }
- if pos := f.Pos(); pos.IsValid() {
- fmt.Fprintf(buf, "# Location: %s\n", f.Prog.Fset.Position(pos))
- }
-
- if f.parent != nil {
- fmt.Fprintf(buf, "# Parent: %s\n", f.parent.Name())
- }
-
- from := f.pkg()
-
- if f.FreeVars != nil {
- buf.WriteString("# Free variables:\n")
- for i, fv := range f.FreeVars {
- fmt.Fprintf(buf, "# % 3d:\t%s %s\n", i, fv.Name(), relType(fv.Type(), from))
- }
- }
-
- if len(f.Locals) > 0 {
- buf.WriteString("# Locals:\n")
- for i, l := range f.Locals {
- fmt.Fprintf(buf, "# % 3d:\t%s %s\n", i, l.Name(), relType(deref(l.Type()), from))
- }
- }
- writeSignature(buf, from, f.Name(), f.Signature, f.Params)
- buf.WriteString(":\n")
-
- if f.Blocks == nil {
- buf.WriteString("\t(external)\n")
- }
-
- for _, b := range f.Blocks {
- if b == nil {
- // Corrupt CFG.
- fmt.Fprintf(buf, ".nil:\n")
- continue
- }
- fmt.Fprintf(buf, "b%d:", b.Index)
- if len(b.Preds) > 0 {
- fmt.Fprint(buf, " ←")
- for _, pred := range b.Preds {
- fmt.Fprintf(buf, " b%d", pred.Index)
- }
- }
- if b.Comment != "" {
- fmt.Fprintf(buf, " # %s", b.Comment)
- }
- buf.WriteByte('\n')
-
- if false { // CFG debugging
- fmt.Fprintf(buf, "\t# CFG: %s --> %s --> %s\n", b.Preds, b, b.Succs)
- }
-
- buf2 := &bytes.Buffer{}
- for _, instr := range b.Instrs {
- buf.WriteString("\t")
- switch v := instr.(type) {
- case Value:
- // Left-align the instruction.
- if name := v.Name(); name != "" {
- fmt.Fprintf(buf, "%s = ", name)
- }
- buf.WriteString(instr.String())
- case nil:
- // Be robust against bad transforms.
- buf.WriteString("")
- default:
- buf.WriteString(instr.String())
- }
- buf.WriteString("\n")
-
- if f.Prog.mode&PrintSource != 0 {
- if s := instr.Source(); s != nil {
- buf2.Reset()
- format.Node(buf2, f.Prog.Fset, s)
- for {
- line, err := buf2.ReadString('\n')
- if len(line) == 0 {
- break
- }
- buf.WriteString("\t\t> ")
- buf.WriteString(line)
- if line[len(line)-1] != '\n' {
- buf.WriteString("\n")
- }
- if err != nil {
- break
- }
- }
- }
- }
- }
- buf.WriteString("\n")
- }
-}
-
-// newBasicBlock adds to f a new basic block and returns it. It does
-// not automatically become the current block for subsequent calls to emit.
-// comment is an optional string for more readable debugging output.
-//
-func (f *Function) newBasicBlock(comment string) *BasicBlock {
- b := &BasicBlock{
- Index: len(f.Blocks),
- Comment: comment,
- parent: f,
- }
- b.Succs = b.succs2[:0]
- f.Blocks = append(f.Blocks, b)
- return b
-}
-
-// NewFunction returns a new synthetic Function instance belonging to
-// prog, with its name and signature fields set as specified.
-//
-// The caller is responsible for initializing the remaining fields of
-// the function object, e.g. Pkg, Params, Blocks.
-//
-// It is practically impossible for clients to construct well-formed
-// IR functions/packages/programs directly, so we assume this is the
-// job of the Builder alone. NewFunction exists to provide clients a
-// little flexibility. For example, analysis tools may wish to
-// construct fake Functions for the root of the callgraph, a fake
-// "reflect" package, etc.
-//
-// TODO(adonovan): think harder about the API here.
-//
-func (prog *Program) NewFunction(name string, sig *types.Signature, provenance string) *Function {
- return &Function{Prog: prog, name: name, Signature: sig, Synthetic: provenance}
-}
-
-//lint:ignore U1000 we may make use of this for functions loaded from export data
-type extentNode [2]token.Pos
-
-func (n extentNode) Pos() token.Pos { return n[0] }
-func (n extentNode) End() token.Pos { return n[1] }
-
-func (f *Function) initHTML(name string) {
- if name == "" {
- return
- }
- if rel := f.RelString(nil); rel == name {
- f.wr = NewHTMLWriter("ir.html", rel, "")
- }
-}
diff --git a/vendor/honnef.co/go/tools/ir/html.go b/vendor/honnef.co/go/tools/ir/html.go
deleted file mode 100644
index c18375333a9..00000000000
--- a/vendor/honnef.co/go/tools/ir/html.go
+++ /dev/null
@@ -1,1124 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Copyright 2019 Dominik Honnef. All rights reserved.
-
-package ir
-
-import (
- "bytes"
- "fmt"
- "go/types"
- "html"
- "io"
- "log"
- "os"
- "os/exec"
- "path/filepath"
- "reflect"
- "sort"
- "strings"
-)
-
-func live(f *Function) []bool {
- max := 0
- var ops []*Value
-
- for _, b := range f.Blocks {
- for _, instr := range b.Instrs {
- if int(instr.ID()) > max {
- max = int(instr.ID())
- }
- }
- }
-
- out := make([]bool, max+1)
- var q []Node
- for _, b := range f.Blocks {
- for _, instr := range b.Instrs {
- switch instr.(type) {
- case *BlankStore, *Call, *ConstantSwitch, *Defer, *Go, *If, *Jump, *MapUpdate, *Next, *Panic, *Recv, *Return, *RunDefers, *Send, *Store, *Unreachable:
- out[instr.ID()] = true
- q = append(q, instr)
- }
- }
- }
-
- for len(q) > 0 {
- v := q[len(q)-1]
- q = q[:len(q)-1]
- for _, op := range v.Operands(ops) {
- if *op == nil {
- continue
- }
- if !out[(*op).ID()] {
- out[(*op).ID()] = true
- q = append(q, *op)
- }
- }
- }
-
- return out
-}
-
-type funcPrinter interface {
- startBlock(b *BasicBlock, reachable bool)
- endBlock(b *BasicBlock)
- value(v Node, live bool)
- startDepCycle()
- endDepCycle()
- named(n string, vals []Value)
-}
-
-func namedValues(f *Function) map[types.Object][]Value {
- names := map[types.Object][]Value{}
- for _, b := range f.Blocks {
- for _, instr := range b.Instrs {
- if instr, ok := instr.(*DebugRef); ok {
- if obj := instr.object; obj != nil {
- names[obj] = append(names[obj], instr.X)
- }
- }
- }
- }
- // XXX deduplicate values
- return names
-}
-
-func fprintFunc(p funcPrinter, f *Function) {
- // XXX does our IR form preserve unreachable blocks?
- // reachable, live := findlive(f)
-
- l := live(f)
- for _, b := range f.Blocks {
- // XXX
- // p.startBlock(b, reachable[b.Index])
- p.startBlock(b, true)
-
- end := len(b.Instrs) - 1
- if end < 0 {
- end = 0
- }
- for _, v := range b.Instrs[:end] {
- if _, ok := v.(*DebugRef); !ok {
- p.value(v, l[v.ID()])
- }
- }
- p.endBlock(b)
- }
-
- names := namedValues(f)
- keys := make([]types.Object, 0, len(names))
- for key := range names {
- keys = append(keys, key)
- }
- sort.Slice(keys, func(i, j int) bool {
- return keys[i].Pos() < keys[j].Pos()
- })
- for _, key := range keys {
- p.named(key.Name(), names[key])
- }
-}
-
-func opName(v Node) string {
- switch v := v.(type) {
- case *Call:
- if v.Common().IsInvoke() {
- return "Invoke"
- }
- return "Call"
- case *Alloc:
- if v.Heap {
- return "HeapAlloc"
- }
- return "StackAlloc"
- case *Select:
- if v.Blocking {
- return "SelectBlocking"
- }
- return "SelectNonBlocking"
- default:
- return reflect.ValueOf(v).Type().Elem().Name()
- }
-}
-
-type HTMLWriter struct {
- w io.WriteCloser
- path string
- dot *dotWriter
-}
-
-func NewHTMLWriter(path string, funcname, cfgMask string) *HTMLWriter {
- out, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
- if err != nil {
- log.Fatalf("%v", err)
- }
- pwd, err := os.Getwd()
- if err != nil {
- log.Fatalf("%v", err)
- }
- html := HTMLWriter{w: out, path: filepath.Join(pwd, path)}
- html.dot = newDotWriter()
- html.start(funcname)
- return &html
-}
-
-func (w *HTMLWriter) start(name string) {
- if w == nil {
- return
- }
- w.WriteString("")
- w.WriteString(`
-
-
-
-
-
-`)
- w.WriteString("")
- w.WriteString("")
- w.WriteString(html.EscapeString(name))
- w.WriteString(" ")
- w.WriteString(`
-help
-
-
-
-Click on a value or block to toggle highlighting of that value/block
-and its uses. (Values and blocks are highlighted by ID, and IDs of
-dead items may be reused, so not all highlights necessarily correspond
-to the clicked item.)
-
-
-
-Faded out values and blocks are dead code that has not been eliminated.
-
-
-
-Values printed in italics have a dependency cycle.
-
-
-
-CFG : Dashed edge is for unlikely branches. Blue color is for backward edges.
-Edge with a dot means that this edge follows the order in which blocks were laidout.
-
-
-
-`)
- w.WriteString("")
- w.WriteString("")
-}
-
-func (w *HTMLWriter) Close() {
- if w == nil {
- return
- }
- io.WriteString(w.w, " ")
- io.WriteString(w.w, "
")
- io.WriteString(w.w, "")
- io.WriteString(w.w, "")
- w.w.Close()
- fmt.Printf("dumped IR to %v\n", w.path)
-}
-
-// WriteFunc writes f in a column headed by title.
-// phase is used for collapsing columns and should be unique across the table.
-func (w *HTMLWriter) WriteFunc(phase, title string, f *Function) {
- if w == nil {
- return
- }
- w.WriteColumn(phase, title, "", funcHTML(f, phase, w.dot))
-}
-
-// WriteColumn writes raw HTML in a column headed by title.
-// It is intended for pre- and post-compilation log output.
-func (w *HTMLWriter) WriteColumn(phase, title, class, html string) {
- if w == nil {
- return
- }
- id := strings.Replace(phase, " ", "-", -1)
- // collapsed column
- w.Printf("%v
", id, phase)
-
- if class == "" {
- w.Printf("", id)
- } else {
- w.Printf(" ", id, class)
- }
- w.WriteString("" + title + " ")
- w.WriteString(html)
- w.WriteString(" ")
-}
-
-func (w *HTMLWriter) Printf(msg string, v ...interface{}) {
- if _, err := fmt.Fprintf(w.w, msg, v...); err != nil {
- log.Fatalf("%v", err)
- }
-}
-
-func (w *HTMLWriter) WriteString(s string) {
- if _, err := io.WriteString(w.w, s); err != nil {
- log.Fatalf("%v", err)
- }
-}
-
-func valueHTML(v Node) string {
- if v == nil {
- return "<nil>"
- }
- // TODO: Using the value ID as the class ignores the fact
- // that value IDs get recycled and that some values
- // are transmuted into other values.
- class := fmt.Sprintf("t%d", v.ID())
- var label string
- switch v := v.(type) {
- case *Function:
- label = v.RelString(nil)
- case *Builtin:
- label = v.Name()
- default:
- label = class
- }
- return fmt.Sprintf("%s ", class, label)
-}
-
-func valueLongHTML(v Node) string {
- // TODO: Any intra-value formatting?
- // I'm wary of adding too much visual noise,
- // but a little bit might be valuable.
- // We already have visual noise in the form of punctuation
- // maybe we could replace some of that with formatting.
- s := fmt.Sprintf("", v.ID())
-
- linenumber := "(?) "
- if v.Pos().IsValid() {
- line := v.Parent().Prog.Fset.Position(v.Pos()).Line
- linenumber = fmt.Sprintf("(%d) ", line, line)
- }
-
- s += fmt.Sprintf("%s %s = %s", valueHTML(v), linenumber, opName(v))
-
- if v, ok := v.(Value); ok {
- s += " <" + html.EscapeString(v.Type().String()) + ">"
- }
-
- switch v := v.(type) {
- case *Parameter:
- s += fmt.Sprintf(" {%s}", html.EscapeString(v.name))
- case *BinOp:
- s += fmt.Sprintf(" {%s}", html.EscapeString(v.Op.String()))
- case *UnOp:
- s += fmt.Sprintf(" {%s}", html.EscapeString(v.Op.String()))
- case *Extract:
- name := v.Tuple.Type().(*types.Tuple).At(v.Index).Name()
- s += fmt.Sprintf(" [%d] (%s)", v.Index, name)
- case *Field:
- st := v.X.Type().Underlying().(*types.Struct)
- // Be robust against a bad index.
- name := "?"
- if 0 <= v.Field && v.Field < st.NumFields() {
- name = st.Field(v.Field).Name()
- }
- s += fmt.Sprintf(" [%d] (%s)", v.Field, name)
- case *FieldAddr:
- st := deref(v.X.Type()).Underlying().(*types.Struct)
- // Be robust against a bad index.
- name := "?"
- if 0 <= v.Field && v.Field < st.NumFields() {
- name = st.Field(v.Field).Name()
- }
-
- s += fmt.Sprintf(" [%d] (%s)", v.Field, name)
- case *Recv:
- s += fmt.Sprintf(" {%t}", v.CommaOk)
- case *Call:
- if v.Common().IsInvoke() {
- s += fmt.Sprintf(" {%s}", html.EscapeString(v.Common().Method.FullName()))
- }
- case *Const:
- if v.Value == nil {
- s += " {<nil>}"
- } else {
- s += fmt.Sprintf(" {%s}", html.EscapeString(v.Value.String()))
- }
- case *Sigma:
- s += fmt.Sprintf(" [#%s]", v.From)
- }
- for _, a := range v.Operands(nil) {
- s += fmt.Sprintf(" %s", valueHTML(*a))
- }
-
- // OPT(dh): we're calling namedValues many times on the same function.
- allNames := namedValues(v.Parent())
- var names []string
- for name, values := range allNames {
- for _, value := range values {
- if v == value {
- names = append(names, name.Name())
- break
- }
- }
- }
- if len(names) != 0 {
- s += " (" + strings.Join(names, ", ") + ")"
- }
-
- s += " "
- return s
-}
-
-func blockHTML(b *BasicBlock) string {
- // TODO: Using the value ID as the class ignores the fact
- // that value IDs get recycled and that some values
- // are transmuted into other values.
- s := html.EscapeString(b.String())
- return fmt.Sprintf("%s ", s, s)
-}
-
-func blockLongHTML(b *BasicBlock) string {
- var kind string
- var term Instruction
- if len(b.Instrs) > 0 {
- term = b.Control()
- kind = opName(term)
- }
- // TODO: improve this for HTML?
- s := fmt.Sprintf("%s ", b.Index, kind)
-
- if term != nil {
- ops := term.Operands(nil)
- if len(ops) > 0 {
- var ss []string
- for _, op := range ops {
- ss = append(ss, valueHTML(*op))
- }
- s += " " + strings.Join(ss, ", ")
- }
- }
- if len(b.Succs) > 0 {
- s += " →" // right arrow
- for _, c := range b.Succs {
- s += " " + blockHTML(c)
- }
- }
- return s
-}
-
-func funcHTML(f *Function, phase string, dot *dotWriter) string {
- buf := new(bytes.Buffer)
- if dot != nil {
- dot.writeFuncSVG(buf, phase, f)
- }
- fmt.Fprint(buf, "")
- p := htmlFuncPrinter{w: buf}
- fprintFunc(p, f)
-
- // fprintFunc(&buf, f) // TODO: HTML, not text, for line breaks, etc.
- fmt.Fprint(buf, "")
- return buf.String()
-}
-
-type htmlFuncPrinter struct {
- w io.Writer
-}
-
-func (p htmlFuncPrinter) startBlock(b *BasicBlock, reachable bool) {
- var dead string
- if !reachable {
- dead = "dead-block"
- }
- fmt.Fprintf(p.w, "", b, dead)
- fmt.Fprintf(p.w, "%s:", blockHTML(b))
- if len(b.Preds) > 0 {
- io.WriteString(p.w, " ←") // left arrow
- for _, pred := range b.Preds {
- fmt.Fprintf(p.w, " %s", blockHTML(pred))
- }
- }
- if len(b.Instrs) > 0 {
- io.WriteString(p.w, `- `)
- }
- io.WriteString(p.w, " ")
- if len(b.Instrs) > 0 { // start list of values
- io.WriteString(p.w, "")
- io.WriteString(p.w, "")
- }
-}
-
-func (p htmlFuncPrinter) endBlock(b *BasicBlock) {
- if len(b.Instrs) > 0 { // end list of values
- io.WriteString(p.w, " ")
- io.WriteString(p.w, " ")
- }
- io.WriteString(p.w, "")
- fmt.Fprint(p.w, blockLongHTML(b))
- io.WriteString(p.w, " ")
- io.WriteString(p.w, " ")
-}
-
-func (p htmlFuncPrinter) value(v Node, live bool) {
- var dead string
- if !live {
- dead = "dead-value"
- }
- fmt.Fprintf(p.w, "", dead)
- fmt.Fprint(p.w, valueLongHTML(v))
- io.WriteString(p.w, " ")
-}
-
-func (p htmlFuncPrinter) startDepCycle() {
- fmt.Fprintln(p.w, "")
-}
-
-func (p htmlFuncPrinter) endDepCycle() {
- fmt.Fprintln(p.w, " ")
-}
-
-func (p htmlFuncPrinter) named(n string, vals []Value) {
- fmt.Fprintf(p.w, "name %s: ", n)
- for _, val := range vals {
- fmt.Fprintf(p.w, "%s ", valueHTML(val))
- }
- fmt.Fprintf(p.w, " ")
-}
-
-type dotWriter struct {
- path string
- broken bool
-}
-
-// newDotWriter returns non-nil value when mask is valid.
-// dotWriter will generate SVGs only for the phases specified in the mask.
-// mask can contain following patterns and combinations of them:
-// * - all of them;
-// x-y - x through y, inclusive;
-// x,y - x and y, but not the passes between.
-func newDotWriter() *dotWriter {
- path, err := exec.LookPath("dot")
- if err != nil {
- fmt.Println(err)
- return nil
- }
- return &dotWriter{path: path}
-}
-
-func (d *dotWriter) writeFuncSVG(w io.Writer, phase string, f *Function) {
- if d.broken {
- return
- }
- cmd := exec.Command(d.path, "-Tsvg")
- pipe, err := cmd.StdinPipe()
- if err != nil {
- d.broken = true
- fmt.Println(err)
- return
- }
- buf := new(bytes.Buffer)
- cmd.Stdout = buf
- bufErr := new(bytes.Buffer)
- cmd.Stderr = bufErr
- err = cmd.Start()
- if err != nil {
- d.broken = true
- fmt.Println(err)
- return
- }
- fmt.Fprint(pipe, `digraph "" { margin=0; size="4,40"; ranksep=.2; `)
- id := strings.Replace(phase, " ", "-", -1)
- fmt.Fprintf(pipe, `id="g_graph_%s";`, id)
- fmt.Fprintf(pipe, `node [style=filled,fillcolor=white,fontsize=16,fontname="Menlo,Times,serif",margin="0.01,0.03"];`)
- fmt.Fprintf(pipe, `edge [fontsize=16,fontname="Menlo,Times,serif"];`)
- for _, b := range f.Blocks {
- layout := ""
- fmt.Fprintf(pipe, `%v [label="%v%s\n%v",id="graph_node_%v_%v"];`, b, b, layout, b.Control().String(), id, b)
- }
- indexOf := make([]int, len(f.Blocks))
- for i, b := range f.Blocks {
- indexOf[b.Index] = i
- }
-
- // XXX
- /*
- ponums := make([]int32, len(f.Blocks))
- _ = postorderWithNumbering(f, ponums)
- isBackEdge := func(from, to int) bool {
- return ponums[from] <= ponums[to]
- }
- */
- isBackEdge := func(from, to int) bool { return false }
-
- for _, b := range f.Blocks {
- for i, s := range b.Succs {
- style := "solid"
- color := "black"
- arrow := "vee"
- if isBackEdge(b.Index, s.Index) {
- color = "blue"
- }
- fmt.Fprintf(pipe, `%v -> %v [label=" %d ",style="%s",color="%s",arrowhead="%s"];`, b, s, i, style, color, arrow)
- }
- }
- fmt.Fprint(pipe, "}")
- pipe.Close()
- err = cmd.Wait()
- if err != nil {
- d.broken = true
- fmt.Printf("dot: %v\n%v\n", err, bufErr.String())
- return
- }
-
- svgID := "svg_graph_" + id
- fmt.Fprintf(w, `- +
`, svgID, svgID)
- // For now, an awful hack: edit the html as it passes through
- // our fingers, finding ' 0 {
- fset = initial[0].Fset
- }
-
- prog := ir.NewProgram(fset, mode)
- if opts != nil {
- prog.PrintFunc = opts.PrintFunc
- }
-
- isInitial := make(map[*packages.Package]bool, len(initial))
- for _, p := range initial {
- isInitial[p] = true
- }
-
- irmap := make(map[*packages.Package]*ir.Package)
- packages.Visit(initial, nil, func(p *packages.Package) {
- if p.Types != nil && !p.IllTyped {
- var files []*ast.File
- if deps || isInitial[p] {
- files = p.Syntax
- }
- irmap[p] = prog.CreatePackage(p.Types, files, p.TypesInfo, true)
- }
- })
-
- var irpkgs []*ir.Package
- for _, p := range initial {
- irpkgs = append(irpkgs, irmap[p]) // may be nil
- }
- return prog, irpkgs
-}
-
-// CreateProgram returns a new program in IR form, given a program
-// loaded from source. An IR package is created for each transitively
-// error-free package of lprog.
-//
-// Code for bodies of functions is not built until Build is called
-// on the result.
-//
-// The mode parameter controls diagnostics and checking during IR construction.
-//
-// Deprecated: use golang.org/x/tools/go/packages and the Packages
-// function instead; see ir.ExampleLoadPackages.
-//
-func CreateProgram(lprog *loader.Program, mode ir.BuilderMode) *ir.Program {
- prog := ir.NewProgram(lprog.Fset, mode)
-
- for _, info := range lprog.AllPackages {
- if info.TransitivelyErrorFree {
- prog.CreatePackage(info.Pkg, info.Files, &info.Info, info.Importable)
- }
- }
-
- return prog
-}
-
-// BuildPackage builds an IR program with IR for a single package.
-//
-// It populates pkg by type-checking the specified file ASTs. All
-// dependencies are loaded using the importer specified by tc, which
-// typically loads compiler export data; IR code cannot be built for
-// those packages. BuildPackage then constructs an ir.Program with all
-// dependency packages created, and builds and returns the IR package
-// corresponding to pkg.
-//
-// The caller must have set pkg.Path() to the import path.
-//
-// The operation fails if there were any type-checking or import errors.
-//
-// See ../ir/example_test.go for an example.
-//
-func BuildPackage(tc *types.Config, fset *token.FileSet, pkg *types.Package, files []*ast.File, mode ir.BuilderMode) (*ir.Package, *types.Info, error) {
- if fset == nil {
- panic("no token.FileSet")
- }
- if pkg.Path() == "" {
- panic("package has no import path")
- }
-
- info := &types.Info{
- Types: make(map[ast.Expr]types.TypeAndValue),
- Defs: make(map[*ast.Ident]types.Object),
- Uses: make(map[*ast.Ident]types.Object),
- Implicits: make(map[ast.Node]types.Object),
- Scopes: make(map[ast.Node]*types.Scope),
- Selections: make(map[*ast.SelectorExpr]*types.Selection),
- }
- if err := types.NewChecker(tc, fset, pkg, info).Files(files); err != nil {
- return nil, nil, err
- }
-
- prog := ir.NewProgram(fset, mode)
-
- // Create IR packages for all imports.
- // Order is not significant.
- created := make(map[*types.Package]bool)
- var createAll func(pkgs []*types.Package)
- createAll = func(pkgs []*types.Package) {
- for _, p := range pkgs {
- if !created[p] {
- created[p] = true
- prog.CreatePackage(p, nil, nil, true)
- createAll(p.Imports())
- }
- }
- }
- createAll(pkg.Imports())
-
- // Create and build the primary package.
- irpkg := prog.CreatePackage(pkg, files, info, false)
- irpkg.Build()
- return irpkg, info, nil
-}
diff --git a/vendor/honnef.co/go/tools/ir/irutil/switch.go b/vendor/honnef.co/go/tools/ir/irutil/switch.go
deleted file mode 100644
index f44cbca9e9e..00000000000
--- a/vendor/honnef.co/go/tools/ir/irutil/switch.go
+++ /dev/null
@@ -1,264 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package irutil
-
-// This file implements discovery of switch and type-switch constructs
-// from low-level control flow.
-//
-// Many techniques exist for compiling a high-level switch with
-// constant cases to efficient machine code. The optimal choice will
-// depend on the data type, the specific case values, the code in the
-// body of each case, and the hardware.
-// Some examples:
-// - a lookup table (for a switch that maps constants to constants)
-// - a computed goto
-// - a binary tree
-// - a perfect hash
-// - a two-level switch (to partition constant strings by their first byte).
-
-import (
- "bytes"
- "fmt"
- "go/token"
- "go/types"
-
- "honnef.co/go/tools/ir"
-)
-
-// A ConstCase represents a single constant comparison.
-// It is part of a Switch.
-type ConstCase struct {
- Block *ir.BasicBlock // block performing the comparison
- Body *ir.BasicBlock // body of the case
- Value *ir.Const // case comparand
-}
-
-// A TypeCase represents a single type assertion.
-// It is part of a Switch.
-type TypeCase struct {
- Block *ir.BasicBlock // block performing the type assert
- Body *ir.BasicBlock // body of the case
- Type types.Type // case type
- Binding ir.Value // value bound by this case
-}
-
-// A Switch is a logical high-level control flow operation
-// (a multiway branch) discovered by analysis of a CFG containing
-// only if/else chains. It is not part of the ir.Instruction set.
-//
-// One of ConstCases and TypeCases has length >= 2;
-// the other is nil.
-//
-// In a value switch, the list of cases may contain duplicate constants.
-// A type switch may contain duplicate types, or types assignable
-// to an interface type also in the list.
-// TODO(adonovan): eliminate such duplicates.
-//
-type Switch struct {
- Start *ir.BasicBlock // block containing start of if/else chain
- X ir.Value // the switch operand
- ConstCases []ConstCase // ordered list of constant comparisons
- TypeCases []TypeCase // ordered list of type assertions
- Default *ir.BasicBlock // successor if all comparisons fail
-}
-
-func (sw *Switch) String() string {
- // We represent each block by the String() of its
- // first Instruction, e.g. "print(42:int)".
- var buf bytes.Buffer
- if sw.ConstCases != nil {
- fmt.Fprintf(&buf, "switch %s {\n", sw.X.Name())
- for _, c := range sw.ConstCases {
- fmt.Fprintf(&buf, "case %s: %s\n", c.Value.Name(), c.Body.Instrs[0])
- }
- } else {
- fmt.Fprintf(&buf, "switch %s.(type) {\n", sw.X.Name())
- for _, c := range sw.TypeCases {
- fmt.Fprintf(&buf, "case %s %s: %s\n",
- c.Binding.Name(), c.Type, c.Body.Instrs[0])
- }
- }
- if sw.Default != nil {
- fmt.Fprintf(&buf, "default: %s\n", sw.Default.Instrs[0])
- }
- fmt.Fprintf(&buf, "}")
- return buf.String()
-}
-
-// Switches examines the control-flow graph of fn and returns the
-// set of inferred value and type switches. A value switch tests an
-// ir.Value for equality against two or more compile-time constant
-// values. Switches involving link-time constants (addresses) are
-// ignored. A type switch type-asserts an ir.Value against two or
-// more types.
-//
-// The switches are returned in dominance order.
-//
-// The resulting switches do not necessarily correspond to uses of the
-// 'switch' keyword in the source: for example, a single source-level
-// switch statement with non-constant cases may result in zero, one or
-// many Switches, one per plural sequence of constant cases.
-// Switches may even be inferred from if/else- or goto-based control flow.
-// (In general, the control flow constructs of the source program
-// cannot be faithfully reproduced from the IR.)
-//
-func Switches(fn *ir.Function) []Switch {
- // Traverse the CFG in dominance order, so we don't
- // enter an if/else-chain in the middle.
- var switches []Switch
- seen := make(map[*ir.BasicBlock]bool) // TODO(adonovan): opt: use ir.blockSet
- for _, b := range fn.DomPreorder() {
- if x, k := isComparisonBlock(b); x != nil {
- // Block b starts a switch.
- sw := Switch{Start: b, X: x}
- valueSwitch(&sw, k, seen)
- if len(sw.ConstCases) > 1 {
- switches = append(switches, sw)
- }
- }
-
- if y, x, T := isTypeAssertBlock(b); y != nil {
- // Block b starts a type switch.
- sw := Switch{Start: b, X: x}
- typeSwitch(&sw, y, T, seen)
- if len(sw.TypeCases) > 1 {
- switches = append(switches, sw)
- }
- }
- }
- return switches
-}
-
-func isSameX(x1 ir.Value, x2 ir.Value) bool {
- if x1 == x2 {
- return true
- }
- if x2, ok := x2.(*ir.Sigma); ok {
- return isSameX(x1, x2.X)
- }
- return false
-}
-
-func valueSwitch(sw *Switch, k *ir.Const, seen map[*ir.BasicBlock]bool) {
- b := sw.Start
- x := sw.X
- for isSameX(sw.X, x) {
- if seen[b] {
- break
- }
- seen[b] = true
-
- sw.ConstCases = append(sw.ConstCases, ConstCase{
- Block: b,
- Body: b.Succs[0],
- Value: k,
- })
- b = b.Succs[1]
- n := 0
- for _, instr := range b.Instrs {
- switch instr.(type) {
- case *ir.If, *ir.BinOp:
- n++
- case *ir.Sigma, *ir.Phi, *ir.DebugRef:
- default:
- n += 1000
- }
- }
- if n != 2 {
- // Block b contains not just 'if x == k' and σ/ϕ nodes,
- // so it may have side effects that
- // make it unsafe to elide.
- break
- }
- if len(b.Preds) != 1 {
- // Block b has multiple predecessors,
- // so it cannot be treated as a case.
- break
- }
- x, k = isComparisonBlock(b)
- }
- sw.Default = b
-}
-
-func typeSwitch(sw *Switch, y ir.Value, T types.Type, seen map[*ir.BasicBlock]bool) {
- b := sw.Start
- x := sw.X
- for isSameX(sw.X, x) {
- if seen[b] {
- break
- }
- seen[b] = true
-
- sw.TypeCases = append(sw.TypeCases, TypeCase{
- Block: b,
- Body: b.Succs[0],
- Type: T,
- Binding: y,
- })
- b = b.Succs[1]
- n := 0
- for _, instr := range b.Instrs {
- switch instr.(type) {
- case *ir.TypeAssert, *ir.Extract, *ir.If:
- n++
- case *ir.Sigma, *ir.Phi:
- default:
- n += 1000
- }
- }
- if n != 4 {
- // Block b contains not just
- // {TypeAssert; Extract #0; Extract #1; If}
- // so it may have side effects that
- // make it unsafe to elide.
- break
- }
- if len(b.Preds) != 1 {
- // Block b has multiple predecessors,
- // so it cannot be treated as a case.
- break
- }
- y, x, T = isTypeAssertBlock(b)
- }
- sw.Default = b
-}
-
-// isComparisonBlock returns the operands (v, k) if a block ends with
-// a comparison v==k, where k is a compile-time constant.
-//
-func isComparisonBlock(b *ir.BasicBlock) (v ir.Value, k *ir.Const) {
- if n := len(b.Instrs); n >= 2 {
- if i, ok := b.Instrs[n-1].(*ir.If); ok {
- if binop, ok := i.Cond.(*ir.BinOp); ok && binop.Block() == b && binop.Op == token.EQL {
- if k, ok := binop.Y.(*ir.Const); ok {
- return binop.X, k
- }
- if k, ok := binop.X.(*ir.Const); ok {
- return binop.Y, k
- }
- }
- }
- }
- return
-}
-
-// isTypeAssertBlock returns the operands (y, x, T) if a block ends with
-// a type assertion "if y, ok := x.(T); ok {".
-//
-func isTypeAssertBlock(b *ir.BasicBlock) (y, x ir.Value, T types.Type) {
- if n := len(b.Instrs); n >= 4 {
- if i, ok := b.Instrs[n-1].(*ir.If); ok {
- if ext1, ok := i.Cond.(*ir.Extract); ok && ext1.Block() == b && ext1.Index == 1 {
- if ta, ok := ext1.Tuple.(*ir.TypeAssert); ok && ta.Block() == b {
- // hack: relies upon instruction ordering.
- if ext0, ok := b.Instrs[n-3].(*ir.Extract); ok {
- return ext0, ta.X, ta.AssertedType
- }
- }
- }
- }
- }
- return
-}
diff --git a/vendor/honnef.co/go/tools/ir/irutil/util.go b/vendor/honnef.co/go/tools/ir/irutil/util.go
deleted file mode 100644
index 04b25f5f9be..00000000000
--- a/vendor/honnef.co/go/tools/ir/irutil/util.go
+++ /dev/null
@@ -1,70 +0,0 @@
-package irutil
-
-import (
- "honnef.co/go/tools/ir"
-)
-
-func Reachable(from, to *ir.BasicBlock) bool {
- if from == to {
- return true
- }
- if from.Dominates(to) {
- return true
- }
-
- found := false
- Walk(from, func(b *ir.BasicBlock) bool {
- if b == to {
- found = true
- return false
- }
- return true
- })
- return found
-}
-
-func Walk(b *ir.BasicBlock, fn func(*ir.BasicBlock) bool) {
- seen := map[*ir.BasicBlock]bool{}
- wl := []*ir.BasicBlock{b}
- for len(wl) > 0 {
- b := wl[len(wl)-1]
- wl = wl[:len(wl)-1]
- if seen[b] {
- continue
- }
- seen[b] = true
- if !fn(b) {
- continue
- }
- wl = append(wl, b.Succs...)
- }
-}
-
-func Vararg(x *ir.Slice) ([]ir.Value, bool) {
- var out []ir.Value
- slice, ok := x.X.(*ir.Alloc)
- if !ok {
- return nil, false
- }
- for _, ref := range *slice.Referrers() {
- if ref == x {
- continue
- }
- if ref.Block() != x.Block() {
- return nil, false
- }
- idx, ok := ref.(*ir.IndexAddr)
- if !ok {
- return nil, false
- }
- if len(*idx.Referrers()) != 1 {
- return nil, false
- }
- store, ok := (*idx.Referrers())[0].(*ir.Store)
- if !ok {
- return nil, false
- }
- out = append(out, store.Val)
- }
- return out, true
-}
diff --git a/vendor/honnef.co/go/tools/ir/irutil/visit.go b/vendor/honnef.co/go/tools/ir/irutil/visit.go
deleted file mode 100644
index 657c9cde747..00000000000
--- a/vendor/honnef.co/go/tools/ir/irutil/visit.go
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package irutil // import "honnef.co/go/tools/ir/irutil"
-
-import "honnef.co/go/tools/ir"
-
-// This file defines utilities for visiting the IR of
-// a Program.
-//
-// TODO(adonovan): test coverage.
-
-// AllFunctions finds and returns the set of functions potentially
-// needed by program prog, as determined by a simple linker-style
-// reachability algorithm starting from the members and method-sets of
-// each package. The result may include anonymous functions and
-// synthetic wrappers.
-//
-// Precondition: all packages are built.
-//
-func AllFunctions(prog *ir.Program) map[*ir.Function]bool {
- visit := visitor{
- prog: prog,
- seen: make(map[*ir.Function]bool),
- }
- visit.program()
- return visit.seen
-}
-
-type visitor struct {
- prog *ir.Program
- seen map[*ir.Function]bool
-}
-
-func (visit *visitor) program() {
- for _, pkg := range visit.prog.AllPackages() {
- for _, mem := range pkg.Members {
- if fn, ok := mem.(*ir.Function); ok {
- visit.function(fn)
- }
- }
- }
- for _, T := range visit.prog.RuntimeTypes() {
- mset := visit.prog.MethodSets.MethodSet(T)
- for i, n := 0, mset.Len(); i < n; i++ {
- visit.function(visit.prog.MethodValue(mset.At(i)))
- }
- }
-}
-
-func (visit *visitor) function(fn *ir.Function) {
- if !visit.seen[fn] {
- visit.seen[fn] = true
- var buf [10]*ir.Value // avoid alloc in common case
- for _, b := range fn.Blocks {
- for _, instr := range b.Instrs {
- for _, op := range instr.Operands(buf[:0]) {
- if fn, ok := (*op).(*ir.Function); ok {
- visit.function(fn)
- }
- }
- }
- }
- }
-}
-
-// MainPackages returns the subset of the specified packages
-// named "main" that define a main function.
-// The result may include synthetic "testmain" packages.
-func MainPackages(pkgs []*ir.Package) []*ir.Package {
- var mains []*ir.Package
- for _, pkg := range pkgs {
- if pkg.Pkg.Name() == "main" && pkg.Func("main") != nil {
- mains = append(mains, pkg)
- }
- }
- return mains
-}
diff --git a/vendor/honnef.co/go/tools/ir/lift.go b/vendor/honnef.co/go/tools/ir/lift.go
deleted file mode 100644
index 71d5c8cb060..00000000000
--- a/vendor/honnef.co/go/tools/ir/lift.go
+++ /dev/null
@@ -1,1063 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ir
-
-// This file defines the lifting pass which tries to "lift" Alloc
-// cells (new/local variables) into SSA registers, replacing loads
-// with the dominating stored value, eliminating loads and stores, and
-// inserting φ- and σ-nodes as needed.
-
-// Cited papers and resources:
-//
-// Ron Cytron et al. 1991. Efficiently computing SSA form...
-// http://doi.acm.org/10.1145/115372.115320
-//
-// Cooper, Harvey, Kennedy. 2001. A Simple, Fast Dominance Algorithm.
-// Software Practice and Experience 2001, 4:1-10.
-// http://www.hipersoft.rice.edu/grads/publications/dom14.pdf
-//
-// Daniel Berlin, llvmdev mailing list, 2012.
-// http://lists.cs.uiuc.edu/pipermail/llvmdev/2012-January/046638.html
-// (Be sure to expand the whole thread.)
-//
-// C. Scott Ananian. 1997. The static single information form.
-//
-// Jeremy Singer. 2006. Static program analysis based on virtual register renaming.
-
-// TODO(adonovan): opt: there are many optimizations worth evaluating, and
-// the conventional wisdom for SSA construction is that a simple
-// algorithm well engineered often beats those of better asymptotic
-// complexity on all but the most egregious inputs.
-//
-// Danny Berlin suggests that the Cooper et al. algorithm for
-// computing the dominance frontier is superior to Cytron et al.
-// Furthermore he recommends that rather than computing the DF for the
-// whole function then renaming all alloc cells, it may be cheaper to
-// compute the DF for each alloc cell separately and throw it away.
-//
-// Consider exploiting liveness information to avoid creating dead
-// φ-nodes which we then immediately remove.
-//
-// Also see many other "TODO: opt" suggestions in the code.
-
-import (
- "fmt"
- "go/types"
- "os"
-)
-
-// If true, show diagnostic information at each step of lifting.
-// Very verbose.
-const debugLifting = false
-
-// domFrontier maps each block to the set of blocks in its dominance
-// frontier. The outer slice is conceptually a map keyed by
-// Block.Index. The inner slice is conceptually a set, possibly
-// containing duplicates.
-//
-// TODO(adonovan): opt: measure impact of dups; consider a packed bit
-// representation, e.g. big.Int, and bitwise parallel operations for
-// the union step in the Children loop.
-//
-// domFrontier's methods mutate the slice's elements but not its
-// length, so their receivers needn't be pointers.
-//
-type domFrontier [][]*BasicBlock
-
-func (df domFrontier) add(u, v *BasicBlock) {
- df[u.Index] = append(df[u.Index], v)
-}
-
-// build builds the dominance frontier df for the dominator tree of
-// fn, using the algorithm found in A Simple, Fast Dominance
-// Algorithm, Figure 5.
-//
-// TODO(adonovan): opt: consider Berlin approach, computing pruned SSA
-// by pruning the entire IDF computation, rather than merely pruning
-// the DF -> IDF step.
-func (df domFrontier) build(fn *Function) {
- for _, b := range fn.Blocks {
- if len(b.Preds) >= 2 {
- for _, p := range b.Preds {
- runner := p
- for runner != b.dom.idom {
- df.add(runner, b)
- runner = runner.dom.idom
- }
- }
- }
- }
-}
-
-func buildDomFrontier(fn *Function) domFrontier {
- df := make(domFrontier, len(fn.Blocks))
- df.build(fn)
- return df
-}
-
-type postDomFrontier [][]*BasicBlock
-
-func (rdf postDomFrontier) add(u, v *BasicBlock) {
- rdf[u.Index] = append(rdf[u.Index], v)
-}
-
-func (rdf postDomFrontier) build(fn *Function) {
- for _, b := range fn.Blocks {
- if len(b.Succs) >= 2 {
- for _, s := range b.Succs {
- runner := s
- for runner != b.pdom.idom {
- rdf.add(runner, b)
- runner = runner.pdom.idom
- }
- }
- }
- }
-}
-
-func buildPostDomFrontier(fn *Function) postDomFrontier {
- rdf := make(postDomFrontier, len(fn.Blocks))
- rdf.build(fn)
- return rdf
-}
-
-func removeInstr(refs []Instruction, instr Instruction) []Instruction {
- i := 0
- for _, ref := range refs {
- if ref == instr {
- continue
- }
- refs[i] = ref
- i++
- }
- for j := i; j != len(refs); j++ {
- refs[j] = nil // aid GC
- }
- return refs[:i]
-}
-
-func clearInstrs(instrs []Instruction) {
- for i := range instrs {
- instrs[i] = nil
- }
-}
-
-// lift replaces local and new Allocs accessed only with
-// load/store by IR registers, inserting φ- and σ-nodes where necessary.
-// The result is a program in pruned SSI form.
-//
-// Preconditions:
-// - fn has no dead blocks (blockopt has run).
-// - Def/use info (Operands and Referrers) is up-to-date.
-// - The dominator tree is up-to-date.
-//
-func lift(fn *Function) {
- // TODO(adonovan): opt: lots of little optimizations may be
- // worthwhile here, especially if they cause us to avoid
- // buildDomFrontier. For example:
- //
- // - Alloc never loaded? Eliminate.
- // - Alloc never stored? Replace all loads with a zero constant.
- // - Alloc stored once? Replace loads with dominating store;
- // don't forget that an Alloc is itself an effective store
- // of zero.
- // - Alloc used only within a single block?
- // Use degenerate algorithm avoiding φ-nodes.
- // - Consider synergy with scalar replacement of aggregates (SRA).
- // e.g. *(&x.f) where x is an Alloc.
- // Perhaps we'd get better results if we generated this as x.f
- // i.e. Field(x, .f) instead of Load(FieldIndex(x, .f)).
- // Unclear.
- //
- // But we will start with the simplest correct code.
- var df domFrontier
- var rdf postDomFrontier
- var closure *closure
- var newPhis newPhiMap
- var newSigmas newSigmaMap
-
- // During this pass we will replace some BasicBlock.Instrs
- // (allocs, loads and stores) with nil, keeping a count in
- // BasicBlock.gaps. At the end we will reset Instrs to the
- // concatenation of all non-dead newPhis and non-nil Instrs
- // for the block, reusing the original array if space permits.
-
- // While we're here, we also eliminate 'rundefers'
- // instructions in functions that contain no 'defer'
- // instructions.
- usesDefer := false
-
- // Determine which allocs we can lift and number them densely.
- // The renaming phase uses this numbering for compact maps.
- numAllocs := 0
- for _, b := range fn.Blocks {
- b.gaps = 0
- b.rundefers = 0
- for _, instr := range b.Instrs {
- switch instr := instr.(type) {
- case *Alloc:
- if !liftable(instr) {
- instr.index = -1
- continue
- }
- index := -1
- if numAllocs == 0 {
- df = buildDomFrontier(fn)
- rdf = buildPostDomFrontier(fn)
- if len(fn.Blocks) > 2 {
- closure = transitiveClosure(fn)
- }
- newPhis = make(newPhiMap, len(fn.Blocks))
- newSigmas = make(newSigmaMap, len(fn.Blocks))
-
- if debugLifting {
- title := false
- for i, blocks := range df {
- if blocks != nil {
- if !title {
- fmt.Fprintf(os.Stderr, "Dominance frontier of %s:\n", fn)
- title = true
- }
- fmt.Fprintf(os.Stderr, "\t%s: %s\n", fn.Blocks[i], blocks)
- }
- }
- }
- }
- liftAlloc(closure, df, rdf, instr, newPhis, newSigmas)
- index = numAllocs
- numAllocs++
- instr.index = index
- case *Defer:
- usesDefer = true
- case *RunDefers:
- b.rundefers++
- }
- }
- }
-
- if numAllocs > 0 {
- // renaming maps an alloc (keyed by index) to its replacement
- // value. Initially the renaming contains nil, signifying the
- // zero constant of the appropriate type; we construct the
- // Const lazily at most once on each path through the domtree.
- // TODO(adonovan): opt: cache per-function not per subtree.
- renaming := make([]Value, numAllocs)
-
- // Renaming.
- rename(fn.Blocks[0], renaming, newPhis, newSigmas)
-
- simplifyPhis(newPhis)
-
- // Eliminate dead φ- and σ-nodes.
- markLiveNodes(fn.Blocks, newPhis, newSigmas)
- }
-
- // Prepend remaining live φ-nodes to each block and possibly kill rundefers.
- for _, b := range fn.Blocks {
- var head []Instruction
- if numAllocs > 0 {
- nps := newPhis[b.Index]
- head = make([]Instruction, 0, len(nps))
- for _, pred := range b.Preds {
- nss := newSigmas[pred.Index]
- idx := pred.succIndex(b)
- for _, newSigma := range nss {
- if sigma := newSigma.sigmas[idx]; sigma != nil && sigma.live {
- head = append(head, sigma)
-
- // we didn't populate referrers before, as most
- // sigma nodes will be killed
- if refs := sigma.X.Referrers(); refs != nil {
- *refs = append(*refs, sigma)
- }
- } else if sigma != nil {
- sigma.block = nil
- }
- }
- }
- for _, np := range nps {
- if np.phi.live {
- head = append(head, np.phi)
- } else {
- for _, edge := range np.phi.Edges {
- if refs := edge.Referrers(); refs != nil {
- *refs = removeInstr(*refs, np.phi)
- }
- }
- np.phi.block = nil
- }
- }
- }
-
- rundefersToKill := b.rundefers
- if usesDefer {
- rundefersToKill = 0
- }
-
- j := len(head)
- if j+b.gaps+rundefersToKill == 0 {
- continue // fast path: no new phis or gaps
- }
-
- // We could do straight copies instead of element-wise copies
- // when both b.gaps and rundefersToKill are zero. However,
- // that seems to only be the case ~1% of the time, which
- // doesn't seem worth the extra branch.
-
- // Remove dead instructions, add phis and sigmas
- ns := len(b.Instrs) + j - b.gaps - rundefersToKill
- if ns <= cap(b.Instrs) {
- // b.Instrs has enough capacity to store all instructions
-
- // OPT(dh): check cap vs the actually required space; if
- // there is a big enough difference, it may be worth
- // allocating a new slice, to avoid pinning memory.
- dst := b.Instrs[:cap(b.Instrs)]
- i := len(dst) - 1
- for n := len(b.Instrs) - 1; n >= 0; n-- {
- instr := dst[n]
- if instr == nil {
- continue
- }
- if !usesDefer {
- if _, ok := instr.(*RunDefers); ok {
- continue
- }
- }
- dst[i] = instr
- i--
- }
- off := i + 1 - len(head)
- // aid GC
- clearInstrs(dst[:off])
- dst = dst[off:]
- copy(dst, head)
- b.Instrs = dst
- } else {
- // not enough space, so allocate a new slice and copy
- // over.
- dst := make([]Instruction, ns)
- copy(dst, head)
-
- for _, instr := range b.Instrs {
- if instr == nil {
- continue
- }
- if !usesDefer {
- if _, ok := instr.(*RunDefers); ok {
- continue
- }
- }
- dst[j] = instr
- j++
- }
- b.Instrs = dst
- }
- }
-
- // Remove any fn.Locals that were lifted.
- j := 0
- for _, l := range fn.Locals {
- if l.index < 0 {
- fn.Locals[j] = l
- j++
- }
- }
- // Nil out fn.Locals[j:] to aid GC.
- for i := j; i < len(fn.Locals); i++ {
- fn.Locals[i] = nil
- }
- fn.Locals = fn.Locals[:j]
-}
-
-func hasDirectReferrer(instr Instruction) bool {
- for _, instr := range *instr.Referrers() {
- switch instr.(type) {
- case *Phi, *Sigma:
- // ignore
- default:
- return true
- }
- }
- return false
-}
-
-func markLiveNodes(blocks []*BasicBlock, newPhis newPhiMap, newSigmas newSigmaMap) {
- // Phi and sigma nodes are considered live if a non-phi, non-sigma
- // node uses them. Once we find a node that is live, we mark all
- // of its operands as used, too.
- for _, npList := range newPhis {
- for _, np := range npList {
- phi := np.phi
- if !phi.live && hasDirectReferrer(phi) {
- markLivePhi(phi)
- }
- }
- }
- for _, npList := range newSigmas {
- for _, np := range npList {
- for _, sigma := range np.sigmas {
- if sigma != nil && !sigma.live && hasDirectReferrer(sigma) {
- markLiveSigma(sigma)
- }
- }
- }
- }
- // Existing φ-nodes due to && and || operators
- // are all considered live (see Go issue 19622).
- for _, b := range blocks {
- for _, phi := range b.phis() {
- markLivePhi(phi.(*Phi))
- }
- }
-}
-
-func markLivePhi(phi *Phi) {
- phi.live = true
- for _, rand := range phi.Edges {
- switch rand := rand.(type) {
- case *Phi:
- if !rand.live {
- markLivePhi(rand)
- }
- case *Sigma:
- if !rand.live {
- markLiveSigma(rand)
- }
- }
- }
-}
-
-func markLiveSigma(sigma *Sigma) {
- sigma.live = true
- switch rand := sigma.X.(type) {
- case *Phi:
- if !rand.live {
- markLivePhi(rand)
- }
- case *Sigma:
- if !rand.live {
- markLiveSigma(rand)
- }
- }
-}
-
-// simplifyPhis replaces trivial phis with non-phi alternatives. Phi
-// nodes where all edges are identical, or consist of only the phi
-// itself and one other value, may be replaced with the value.
-func simplifyPhis(newPhis newPhiMap) {
- // find all phis that are trivial and can be replaced with a
- // non-phi value. run until we reach a fixpoint, because replacing
- // a phi may make other phis trivial.
- for changed := true; changed; {
- changed = false
- for _, npList := range newPhis {
- for _, np := range npList {
- if np.phi.live {
- // we're reusing 'live' to mean 'dead' in the context of simplifyPhis
- continue
- }
- if r, ok := isUselessPhi(np.phi); ok {
- // useless phi, replace its uses with the
- // replacement value. the dead phi pass will clean
- // up the phi afterwards.
- replaceAll(np.phi, r)
- np.phi.live = true
- changed = true
- }
- }
- }
- }
-
- for _, npList := range newPhis {
- for _, np := range npList {
- np.phi.live = false
- }
- }
-}
-
-type BlockSet struct {
- idx int
- values []bool
- count int
-}
-
-func NewBlockSet(size int) *BlockSet {
- return &BlockSet{values: make([]bool, size)}
-}
-
-func (s *BlockSet) Set(s2 *BlockSet) {
- copy(s.values, s2.values)
- s.count = 0
- for _, v := range s.values {
- if v {
- s.count++
- }
- }
-}
-
-func (s *BlockSet) Num() int {
- return s.count
-}
-
-func (s *BlockSet) Has(b *BasicBlock) bool {
- if b.Index >= len(s.values) {
- return false
- }
- return s.values[b.Index]
-}
-
-// add adds b to the set and returns true if the set changed.
-func (s *BlockSet) Add(b *BasicBlock) bool {
- if s.values[b.Index] {
- return false
- }
- s.count++
- s.values[b.Index] = true
- s.idx = b.Index
-
- return true
-}
-
-func (s *BlockSet) Clear() {
- for j := range s.values {
- s.values[j] = false
- }
- s.count = 0
-}
-
-// take removes an arbitrary element from a set s and
-// returns its index, or returns -1 if empty.
-func (s *BlockSet) Take() int {
- // [i, end]
- for i := s.idx; i < len(s.values); i++ {
- if s.values[i] {
- s.values[i] = false
- s.idx = i
- s.count--
- return i
- }
- }
-
- // [start, i)
- for i := 0; i < s.idx; i++ {
- if s.values[i] {
- s.values[i] = false
- s.idx = i
- s.count--
- return i
- }
- }
-
- return -1
-}
-
-type closure struct {
- span []uint32
- reachables []interval
-}
-
-type interval uint32
-
-const (
- flagMask = 1 << 31
- numBits = 20
- lengthBits = 32 - numBits - 1
- lengthMask = (1<>numBits
- } else {
- // large interval
- i++
- start = uint32(inv & numMask)
- end = uint32(r[i])
- }
- if idx >= start && idx <= end {
- return true
- }
- }
- return false
-}
-
-func (c closure) reachable(id int) []interval {
- return c.reachables[c.span[id]:c.span[id+1]]
-}
-
-func (c closure) walk(current *BasicBlock, b *BasicBlock, visited []bool) {
- visited[b.Index] = true
- for _, succ := range b.Succs {
- if visited[succ.Index] {
- continue
- }
- visited[succ.Index] = true
- c.walk(current, succ, visited)
- }
-}
-
-func transitiveClosure(fn *Function) *closure {
- reachable := make([]bool, len(fn.Blocks))
- c := &closure{}
- c.span = make([]uint32, len(fn.Blocks)+1)
-
- addInterval := func(start, end uint32) {
- if l := end - start; l <= 1<= 0 { // store of zero to Alloc cell
- // Replace dominated loads by the zero value.
- renaming[instr.index] = nil
- if debugLifting {
- fmt.Fprintf(os.Stderr, "\tkill alloc %s\n", instr)
- }
- // Delete the Alloc.
- u.Instrs[i] = nil
- u.gaps++
- }
-
- case *Store:
- if alloc, ok := instr.Addr.(*Alloc); ok && alloc.index >= 0 { // store to Alloc cell
- // Replace dominated loads by the stored value.
- renaming[alloc.index] = instr.Val
- if debugLifting {
- fmt.Fprintf(os.Stderr, "\tkill store %s; new value: %s\n",
- instr, instr.Val.Name())
- }
- if refs := instr.Addr.Referrers(); refs != nil {
- *refs = removeInstr(*refs, instr)
- }
- if refs := instr.Val.Referrers(); refs != nil {
- *refs = removeInstr(*refs, instr)
- }
- // Delete the Store.
- u.Instrs[i] = nil
- u.gaps++
- }
-
- case *Load:
- if alloc, ok := instr.X.(*Alloc); ok && alloc.index >= 0 { // load of Alloc cell
- // In theory, we wouldn't be able to replace loads
- // directly, because a loaded value could be used in
- // different branches, in which case it should be
- // replaced with different sigma nodes. But we can't
- // simply defer replacement, either, because then
- // later stores might incorrectly affect this load.
- //
- // To avoid doing renaming on _all_ values (instead of
- // just loads and stores like we're doing), we make
- // sure during code generation that each load is only
- // used in one block. For example, in constant switch
- // statements, where the tag is only evaluated once,
- // we store it in a temporary and load it for each
- // comparison, so that we have individual loads to
- // replace.
- newval := renamed(u.Parent(), renaming, alloc)
- if debugLifting {
- fmt.Fprintf(os.Stderr, "\tupdate load %s = %s with %s\n",
- instr.Name(), instr, newval)
- }
- replaceAll(instr, newval)
- u.Instrs[i] = nil
- u.gaps++
- }
-
- case *DebugRef:
- if x, ok := instr.X.(*Alloc); ok && x.index >= 0 {
- if instr.IsAddr {
- instr.X = renamed(u.Parent(), renaming, x)
- instr.IsAddr = false
-
- // Add DebugRef to instr.X's referrers.
- if refs := instr.X.Referrers(); refs != nil {
- *refs = append(*refs, instr)
- }
- } else {
- // A source expression denotes the address
- // of an Alloc that was optimized away.
- instr.X = nil
-
- // Delete the DebugRef.
- u.Instrs[i] = nil
- u.gaps++
- }
- }
- }
- }
-
- // update all outgoing sigma nodes with the dominating store
- for _, sigmas := range newSigmas[u.Index] {
- for _, sigma := range sigmas.sigmas {
- if sigma == nil {
- continue
- }
- sigma.X = renamed(u.Parent(), renaming, sigmas.alloc)
- }
- }
-
- // For each φ-node in a CFG successor, rename the edge.
- for succi, v := range u.Succs {
- phis := newPhis[v.Index]
- if len(phis) == 0 {
- continue
- }
- i := v.predIndex(u)
- for _, np := range phis {
- phi := np.phi
- alloc := np.alloc
- // if there's a sigma node, use it, else use the dominating value
- var newval Value
- for _, sigmas := range newSigmas[u.Index] {
- if sigmas.alloc == alloc && sigmas.sigmas[succi] != nil {
- newval = sigmas.sigmas[succi]
- break
- }
- }
- if newval == nil {
- newval = renamed(u.Parent(), renaming, alloc)
- }
- if debugLifting {
- fmt.Fprintf(os.Stderr, "\tsetphi %s edge %s -> %s (#%d) (alloc=%s) := %s\n",
- phi.Name(), u, v, i, alloc.Name(), newval.Name())
- }
- phi.Edges[i] = newval
- if prefs := newval.Referrers(); prefs != nil {
- *prefs = append(*prefs, phi)
- }
- }
- }
-
- // Continue depth-first recursion over domtree, pushing a
- // fresh copy of the renaming map for each subtree.
- r := make([]Value, len(renaming))
- for _, v := range u.dom.children {
- // XXX add debugging
- copy(r, renaming)
-
- // on entry to a block, the incoming sigma nodes become the new values for their alloc
- if idx := u.succIndex(v); idx != -1 {
- for _, sigma := range newSigmas[u.Index] {
- if sigma.sigmas[idx] != nil {
- r[sigma.alloc.index] = sigma.sigmas[idx]
- }
- }
- }
- rename(v, r, newPhis, newSigmas)
- }
-
-}
diff --git a/vendor/honnef.co/go/tools/ir/lvalue.go b/vendor/honnef.co/go/tools/ir/lvalue.go
deleted file mode 100644
index f676a1f7abe..00000000000
--- a/vendor/honnef.co/go/tools/ir/lvalue.go
+++ /dev/null
@@ -1,116 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ir
-
-// lvalues are the union of addressable expressions and map-index
-// expressions.
-
-import (
- "go/ast"
- "go/types"
-)
-
-// An lvalue represents an assignable location that may appear on the
-// left-hand side of an assignment. This is a generalization of a
-// pointer to permit updates to elements of maps.
-//
-type lvalue interface {
- store(fn *Function, v Value, source ast.Node) // stores v into the location
- load(fn *Function, source ast.Node) Value // loads the contents of the location
- address(fn *Function) Value // address of the location
- typ() types.Type // returns the type of the location
-}
-
-// An address is an lvalue represented by a true pointer.
-type address struct {
- addr Value
- expr ast.Expr // source syntax of the value (not address) [debug mode]
-}
-
-func (a *address) load(fn *Function, source ast.Node) Value {
- return emitLoad(fn, a.addr, source)
-}
-
-func (a *address) store(fn *Function, v Value, source ast.Node) {
- store := emitStore(fn, a.addr, v, source)
- if a.expr != nil {
- // store.Val is v, converted for assignability.
- emitDebugRef(fn, a.expr, store.Val, false)
- }
-}
-
-func (a *address) address(fn *Function) Value {
- if a.expr != nil {
- emitDebugRef(fn, a.expr, a.addr, true)
- }
- return a.addr
-}
-
-func (a *address) typ() types.Type {
- return deref(a.addr.Type())
-}
-
-// An element is an lvalue represented by m[k], the location of an
-// element of a map. These locations are not addressable
-// since pointers cannot be formed from them, but they do support
-// load() and store().
-//
-type element struct {
- m, k Value // map
- t types.Type // map element type
-}
-
-func (e *element) load(fn *Function, source ast.Node) Value {
- l := &MapLookup{
- X: e.m,
- Index: e.k,
- }
- l.setType(e.t)
- return fn.emit(l, source)
-}
-
-func (e *element) store(fn *Function, v Value, source ast.Node) {
- up := &MapUpdate{
- Map: e.m,
- Key: e.k,
- Value: emitConv(fn, v, e.t, source),
- }
- fn.emit(up, source)
-}
-
-func (e *element) address(fn *Function) Value {
- panic("map elements are not addressable")
-}
-
-func (e *element) typ() types.Type {
- return e.t
-}
-
-// A blank is a dummy variable whose name is "_".
-// It is not reified: loads are illegal and stores are ignored.
-//
-type blank struct{}
-
-func (bl blank) load(fn *Function, source ast.Node) Value {
- panic("blank.load is illegal")
-}
-
-func (bl blank) store(fn *Function, v Value, source ast.Node) {
- s := &BlankStore{
- Val: v,
- }
- fn.emit(s, source)
-}
-
-func (bl blank) address(fn *Function) Value {
- panic("blank var is not addressable")
-}
-
-func (bl blank) typ() types.Type {
- // This should be the type of the blank Ident; the typechecker
- // doesn't provide this yet, but fortunately, we don't need it
- // yet either.
- panic("blank.typ is unimplemented")
-}
diff --git a/vendor/honnef.co/go/tools/ir/methods.go b/vendor/honnef.co/go/tools/ir/methods.go
deleted file mode 100644
index 517f448b8c3..00000000000
--- a/vendor/honnef.co/go/tools/ir/methods.go
+++ /dev/null
@@ -1,239 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ir
-
-// This file defines utilities for population of method sets.
-
-import (
- "fmt"
- "go/types"
-)
-
-// MethodValue returns the Function implementing method sel, building
-// wrapper methods on demand. It returns nil if sel denotes an
-// abstract (interface) method.
-//
-// Precondition: sel.Kind() == MethodVal.
-//
-// Thread-safe.
-//
-// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu)
-//
-func (prog *Program) MethodValue(sel *types.Selection) *Function {
- if sel.Kind() != types.MethodVal {
- panic(fmt.Sprintf("MethodValue(%s) kind != MethodVal", sel))
- }
- T := sel.Recv()
- if isInterface(T) {
- return nil // abstract method
- }
- if prog.mode&LogSource != 0 {
- defer logStack("MethodValue %s %v", T, sel)()
- }
-
- prog.methodsMu.Lock()
- defer prog.methodsMu.Unlock()
-
- return prog.addMethod(prog.createMethodSet(T), sel)
-}
-
-// LookupMethod returns the implementation of the method of type T
-// identified by (pkg, name). It returns nil if the method exists but
-// is abstract, and panics if T has no such method.
-//
-func (prog *Program) LookupMethod(T types.Type, pkg *types.Package, name string) *Function {
- sel := prog.MethodSets.MethodSet(T).Lookup(pkg, name)
- if sel == nil {
- panic(fmt.Sprintf("%s has no method %s", T, types.Id(pkg, name)))
- }
- return prog.MethodValue(sel)
-}
-
-// methodSet contains the (concrete) methods of a non-interface type.
-type methodSet struct {
- mapping map[string]*Function // populated lazily
- complete bool // mapping contains all methods
-}
-
-// Precondition: !isInterface(T).
-// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu)
-func (prog *Program) createMethodSet(T types.Type) *methodSet {
- mset, ok := prog.methodSets.At(T).(*methodSet)
- if !ok {
- mset = &methodSet{mapping: make(map[string]*Function)}
- prog.methodSets.Set(T, mset)
- }
- return mset
-}
-
-// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu)
-func (prog *Program) addMethod(mset *methodSet, sel *types.Selection) *Function {
- if sel.Kind() == types.MethodExpr {
- panic(sel)
- }
- id := sel.Obj().Id()
- fn := mset.mapping[id]
- if fn == nil {
- obj := sel.Obj().(*types.Func)
-
- needsPromotion := len(sel.Index()) > 1
- needsIndirection := !isPointer(recvType(obj)) && isPointer(sel.Recv())
- if needsPromotion || needsIndirection {
- fn = makeWrapper(prog, sel)
- } else {
- fn = prog.declaredFunc(obj)
- }
- if fn.Signature.Recv() == nil {
- panic(fn) // missing receiver
- }
- mset.mapping[id] = fn
- }
- return fn
-}
-
-// RuntimeTypes returns a new unordered slice containing all
-// concrete types in the program for which a complete (non-empty)
-// method set is required at run-time.
-//
-// Thread-safe.
-//
-// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu)
-//
-func (prog *Program) RuntimeTypes() []types.Type {
- prog.methodsMu.Lock()
- defer prog.methodsMu.Unlock()
-
- var res []types.Type
- prog.methodSets.Iterate(func(T types.Type, v interface{}) {
- if v.(*methodSet).complete {
- res = append(res, T)
- }
- })
- return res
-}
-
-// declaredFunc returns the concrete function/method denoted by obj.
-// Panic ensues if there is none.
-//
-func (prog *Program) declaredFunc(obj *types.Func) *Function {
- if v := prog.packageLevelValue(obj); v != nil {
- return v.(*Function)
- }
- panic("no concrete method: " + obj.String())
-}
-
-// needMethodsOf ensures that runtime type information (including the
-// complete method set) is available for the specified type T and all
-// its subcomponents.
-//
-// needMethodsOf must be called for at least every type that is an
-// operand of some MakeInterface instruction, and for the type of
-// every exported package member.
-//
-// Precondition: T is not a method signature (*Signature with Recv()!=nil).
-//
-// Thread-safe. (Called via emitConv from multiple builder goroutines.)
-//
-// TODO(adonovan): make this faster. It accounts for 20% of SSA build time.
-//
-// EXCLUSIVE_LOCKS_ACQUIRED(prog.methodsMu)
-//
-func (prog *Program) needMethodsOf(T types.Type) {
- prog.methodsMu.Lock()
- prog.needMethods(T, false)
- prog.methodsMu.Unlock()
-}
-
-// Precondition: T is not a method signature (*Signature with Recv()!=nil).
-// Recursive case: skip => don't create methods for T.
-//
-// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu)
-//
-func (prog *Program) needMethods(T types.Type, skip bool) {
- // Each package maintains its own set of types it has visited.
- if prevSkip, ok := prog.runtimeTypes.At(T).(bool); ok {
- // needMethods(T) was previously called
- if !prevSkip || skip {
- return // already seen, with same or false 'skip' value
- }
- }
- prog.runtimeTypes.Set(T, skip)
-
- tmset := prog.MethodSets.MethodSet(T)
-
- if !skip && !isInterface(T) && tmset.Len() > 0 {
- // Create methods of T.
- mset := prog.createMethodSet(T)
- if !mset.complete {
- mset.complete = true
- n := tmset.Len()
- for i := 0; i < n; i++ {
- prog.addMethod(mset, tmset.At(i))
- }
- }
- }
-
- // Recursion over signatures of each method.
- for i := 0; i < tmset.Len(); i++ {
- sig := tmset.At(i).Type().(*types.Signature)
- prog.needMethods(sig.Params(), false)
- prog.needMethods(sig.Results(), false)
- }
-
- switch t := T.(type) {
- case *types.Basic:
- // nop
-
- case *types.Interface:
- // nop---handled by recursion over method set.
-
- case *types.Pointer:
- prog.needMethods(t.Elem(), false)
-
- case *types.Slice:
- prog.needMethods(t.Elem(), false)
-
- case *types.Chan:
- prog.needMethods(t.Elem(), false)
-
- case *types.Map:
- prog.needMethods(t.Key(), false)
- prog.needMethods(t.Elem(), false)
-
- case *types.Signature:
- if t.Recv() != nil {
- panic(fmt.Sprintf("Signature %s has Recv %s", t, t.Recv()))
- }
- prog.needMethods(t.Params(), false)
- prog.needMethods(t.Results(), false)
-
- case *types.Named:
- // A pointer-to-named type can be derived from a named
- // type via reflection. It may have methods too.
- prog.needMethods(types.NewPointer(T), false)
-
- // Consider 'type T struct{S}' where S has methods.
- // Reflection provides no way to get from T to struct{S},
- // only to S, so the method set of struct{S} is unwanted,
- // so set 'skip' flag during recursion.
- prog.needMethods(t.Underlying(), true)
-
- case *types.Array:
- prog.needMethods(t.Elem(), false)
-
- case *types.Struct:
- for i, n := 0, t.NumFields(); i < n; i++ {
- prog.needMethods(t.Field(i).Type(), false)
- }
-
- case *types.Tuple:
- for i, n := 0, t.Len(); i < n; i++ {
- prog.needMethods(t.At(i).Type(), false)
- }
-
- default:
- panic(T)
- }
-}
diff --git a/vendor/honnef.co/go/tools/ir/mode.go b/vendor/honnef.co/go/tools/ir/mode.go
deleted file mode 100644
index da548fdbb29..00000000000
--- a/vendor/honnef.co/go/tools/ir/mode.go
+++ /dev/null
@@ -1,98 +0,0 @@
-// Copyright 2015 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ir
-
-// This file defines the BuilderMode type and its command-line flag.
-
-import (
- "bytes"
- "fmt"
-)
-
-// BuilderMode is a bitmask of options for diagnostics and checking.
-//
-// *BuilderMode satisfies the flag.Value interface. Example:
-//
-// var mode = ir.BuilderMode(0)
-// func init() { flag.Var(&mode, "build", ir.BuilderModeDoc) }
-//
-type BuilderMode uint
-
-const (
- PrintPackages BuilderMode = 1 << iota // Print package inventory to stdout
- PrintFunctions // Print function IR code to stdout
- PrintSource // Print source code when printing function IR
- LogSource // Log source locations as IR builder progresses
- SanityCheckFunctions // Perform sanity checking of function bodies
- NaiveForm // Build naïve IR form: don't replace local loads/stores with registers
- GlobalDebug // Enable debug info for all packages
-)
-
-const BuilderModeDoc = `Options controlling the IR builder.
-The value is a sequence of zero or more of these letters:
-C perform sanity [C]hecking of the IR form.
-D include [D]ebug info for every function.
-P print [P]ackage inventory.
-F print [F]unction IR code.
-A print [A]ST nodes responsible for IR instructions
-S log [S]ource locations as IR builder progresses.
-N build [N]aive IR form: don't replace local loads/stores with registers.
-`
-
-func (m BuilderMode) String() string {
- var buf bytes.Buffer
- if m&GlobalDebug != 0 {
- buf.WriteByte('D')
- }
- if m&PrintPackages != 0 {
- buf.WriteByte('P')
- }
- if m&PrintFunctions != 0 {
- buf.WriteByte('F')
- }
- if m&PrintSource != 0 {
- buf.WriteByte('A')
- }
- if m&LogSource != 0 {
- buf.WriteByte('S')
- }
- if m&SanityCheckFunctions != 0 {
- buf.WriteByte('C')
- }
- if m&NaiveForm != 0 {
- buf.WriteByte('N')
- }
- return buf.String()
-}
-
-// Set parses the flag characters in s and updates *m.
-func (m *BuilderMode) Set(s string) error {
- var mode BuilderMode
- for _, c := range s {
- switch c {
- case 'D':
- mode |= GlobalDebug
- case 'P':
- mode |= PrintPackages
- case 'F':
- mode |= PrintFunctions
- case 'A':
- mode |= PrintSource
- case 'S':
- mode |= LogSource
- case 'C':
- mode |= SanityCheckFunctions
- case 'N':
- mode |= NaiveForm
- default:
- return fmt.Errorf("unknown BuilderMode option: %q", c)
- }
- }
- *m = mode
- return nil
-}
-
-// Get returns m.
-func (m BuilderMode) Get() interface{} { return m }
diff --git a/vendor/honnef.co/go/tools/ir/print.go b/vendor/honnef.co/go/tools/ir/print.go
deleted file mode 100644
index c16c08efa65..00000000000
--- a/vendor/honnef.co/go/tools/ir/print.go
+++ /dev/null
@@ -1,472 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ir
-
-// This file implements the String() methods for all Value and
-// Instruction types.
-
-import (
- "bytes"
- "fmt"
- "go/types"
- "io"
- "reflect"
- "sort"
-
- "golang.org/x/tools/go/types/typeutil"
-)
-
-// relName returns the name of v relative to i.
-// In most cases, this is identical to v.Name(), but references to
-// Functions (including methods) and Globals use RelString and
-// all types are displayed with relType, so that only cross-package
-// references are package-qualified.
-//
-func relName(v Value, i Instruction) string {
- if v == nil {
- return ""
- }
- var from *types.Package
- if i != nil {
- from = i.Parent().pkg()
- }
- switch v := v.(type) {
- case Member: // *Function or *Global
- return v.RelString(from)
- }
- return v.Name()
-}
-
-func relType(t types.Type, from *types.Package) string {
- return types.TypeString(t, types.RelativeTo(from))
-}
-
-func relString(m Member, from *types.Package) string {
- // NB: not all globals have an Object (e.g. init$guard),
- // so use Package().Object not Object.Package().
- if pkg := m.Package().Pkg; pkg != nil && pkg != from {
- return fmt.Sprintf("%s.%s", pkg.Path(), m.Name())
- }
- return m.Name()
-}
-
-// Value.String()
-//
-// This method is provided only for debugging.
-// It never appears in disassembly, which uses Value.Name().
-
-func (v *Parameter) String() string {
- from := v.Parent().pkg()
- return fmt.Sprintf("Parameter <%s> {%s}", relType(v.Type(), from), v.name)
-}
-
-func (v *FreeVar) String() string {
- from := v.Parent().pkg()
- return fmt.Sprintf("FreeVar <%s> %s", relType(v.Type(), from), v.Name())
-}
-
-func (v *Builtin) String() string {
- return fmt.Sprintf("Builtin %s", v.Name())
-}
-
-// Instruction.String()
-
-func (v *Alloc) String() string {
- from := v.Parent().pkg()
- storage := "Stack"
- if v.Heap {
- storage = "Heap"
- }
- return fmt.Sprintf("%sAlloc <%s>", storage, relType(v.Type(), from))
-}
-
-func (v *Sigma) String() string {
- from := v.Parent().pkg()
- s := fmt.Sprintf("Sigma <%s> [b%d] %s", relType(v.Type(), from), v.From.Index, v.X.Name())
- return s
-}
-
-func (v *Phi) String() string {
- var b bytes.Buffer
- fmt.Fprintf(&b, "Phi <%s>", v.Type())
- for i, edge := range v.Edges {
- b.WriteString(" ")
- // Be robust against malformed CFG.
- if v.block == nil {
- b.WriteString("??")
- continue
- }
- block := -1
- if i < len(v.block.Preds) {
- block = v.block.Preds[i].Index
- }
- fmt.Fprintf(&b, "%d:", block)
- edgeVal := "" // be robust
- if edge != nil {
- edgeVal = relName(edge, v)
- }
- b.WriteString(edgeVal)
- }
- return b.String()
-}
-
-func printCall(v *CallCommon, prefix string, instr Instruction) string {
- var b bytes.Buffer
- if !v.IsInvoke() {
- if value, ok := instr.(Value); ok {
- fmt.Fprintf(&b, "%s <%s> %s", prefix, relType(value.Type(), instr.Parent().pkg()), relName(v.Value, instr))
- } else {
- fmt.Fprintf(&b, "%s %s", prefix, relName(v.Value, instr))
- }
- } else {
- if value, ok := instr.(Value); ok {
- fmt.Fprintf(&b, "%sInvoke <%s> %s.%s", prefix, relType(value.Type(), instr.Parent().pkg()), relName(v.Value, instr), v.Method.Name())
- } else {
- fmt.Fprintf(&b, "%sInvoke %s.%s", prefix, relName(v.Value, instr), v.Method.Name())
- }
- }
- for _, arg := range v.Args {
- b.WriteString(" ")
- b.WriteString(relName(arg, instr))
- }
- return b.String()
-}
-
-func (c *CallCommon) String() string {
- return printCall(c, "", nil)
-}
-
-func (v *Call) String() string {
- return printCall(&v.Call, "Call", v)
-}
-
-func (v *BinOp) String() string {
- return fmt.Sprintf("BinOp <%s> {%s} %s %s", relType(v.Type(), v.Parent().pkg()), v.Op.String(), relName(v.X, v), relName(v.Y, v))
-}
-
-func (v *UnOp) String() string {
- return fmt.Sprintf("UnOp <%s> {%s} %s", relType(v.Type(), v.Parent().pkg()), v.Op.String(), relName(v.X, v))
-}
-
-func (v *Load) String() string {
- return fmt.Sprintf("Load <%s> %s", relType(v.Type(), v.Parent().pkg()), relName(v.X, v))
-}
-
-func printConv(prefix string, v, x Value) string {
- from := v.Parent().pkg()
- return fmt.Sprintf("%s <%s> %s",
- prefix,
- relType(v.Type(), from),
- relName(x, v.(Instruction)))
-}
-
-func (v *ChangeType) String() string { return printConv("ChangeType", v, v.X) }
-func (v *Convert) String() string { return printConv("Convert", v, v.X) }
-func (v *ChangeInterface) String() string { return printConv("ChangeInterface", v, v.X) }
-func (v *MakeInterface) String() string { return printConv("MakeInterface", v, v.X) }
-
-func (v *MakeClosure) String() string {
- from := v.Parent().pkg()
- var b bytes.Buffer
- fmt.Fprintf(&b, "MakeClosure <%s> %s", relType(v.Type(), from), relName(v.Fn, v))
- if v.Bindings != nil {
- for _, c := range v.Bindings {
- b.WriteString(" ")
- b.WriteString(relName(c, v))
- }
- }
- return b.String()
-}
-
-func (v *MakeSlice) String() string {
- from := v.Parent().pkg()
- return fmt.Sprintf("MakeSlice <%s> %s %s",
- relType(v.Type(), from),
- relName(v.Len, v),
- relName(v.Cap, v))
-}
-
-func (v *Slice) String() string {
- from := v.Parent().pkg()
- return fmt.Sprintf("Slice <%s> %s %s %s %s",
- relType(v.Type(), from), relName(v.X, v), relName(v.Low, v), relName(v.High, v), relName(v.Max, v))
-}
-
-func (v *MakeMap) String() string {
- res := ""
- if v.Reserve != nil {
- res = relName(v.Reserve, v)
- }
- from := v.Parent().pkg()
- return fmt.Sprintf("MakeMap <%s> %s", relType(v.Type(), from), res)
-}
-
-func (v *MakeChan) String() string {
- from := v.Parent().pkg()
- return fmt.Sprintf("MakeChan <%s> %s", relType(v.Type(), from), relName(v.Size, v))
-}
-
-func (v *FieldAddr) String() string {
- from := v.Parent().pkg()
- st := deref(v.X.Type()).Underlying().(*types.Struct)
- // Be robust against a bad index.
- name := "?"
- if 0 <= v.Field && v.Field < st.NumFields() {
- name = st.Field(v.Field).Name()
- }
- return fmt.Sprintf("FieldAddr <%s> [%d] (%s) %s", relType(v.Type(), from), v.Field, name, relName(v.X, v))
-}
-
-func (v *Field) String() string {
- st := v.X.Type().Underlying().(*types.Struct)
- // Be robust against a bad index.
- name := "?"
- if 0 <= v.Field && v.Field < st.NumFields() {
- name = st.Field(v.Field).Name()
- }
- from := v.Parent().pkg()
- return fmt.Sprintf("Field <%s> [%d] (%s) %s", relType(v.Type(), from), v.Field, name, relName(v.X, v))
-}
-
-func (v *IndexAddr) String() string {
- from := v.Parent().pkg()
- return fmt.Sprintf("IndexAddr <%s> %s %s", relType(v.Type(), from), relName(v.X, v), relName(v.Index, v))
-}
-
-func (v *Index) String() string {
- from := v.Parent().pkg()
- return fmt.Sprintf("Index <%s> %s %s", relType(v.Type(), from), relName(v.X, v), relName(v.Index, v))
-}
-
-func (v *MapLookup) String() string {
- from := v.Parent().pkg()
- return fmt.Sprintf("MapLookup <%s> %s %s", relType(v.Type(), from), relName(v.X, v), relName(v.Index, v))
-}
-
-func (v *StringLookup) String() string {
- from := v.Parent().pkg()
- return fmt.Sprintf("StringLookup <%s> %s %s", relType(v.Type(), from), relName(v.X, v), relName(v.Index, v))
-}
-
-func (v *Range) String() string {
- from := v.Parent().pkg()
- return fmt.Sprintf("Range <%s> %s", relType(v.Type(), from), relName(v.X, v))
-}
-
-func (v *Next) String() string {
- from := v.Parent().pkg()
- return fmt.Sprintf("Next <%s> %s", relType(v.Type(), from), relName(v.Iter, v))
-}
-
-func (v *TypeAssert) String() string {
- from := v.Parent().pkg()
- return fmt.Sprintf("TypeAssert <%s> %s", relType(v.Type(), from), relName(v.X, v))
-}
-
-func (v *Extract) String() string {
- from := v.Parent().pkg()
- name := v.Tuple.Type().(*types.Tuple).At(v.Index).Name()
- return fmt.Sprintf("Extract <%s> [%d] (%s) %s", relType(v.Type(), from), v.Index, name, relName(v.Tuple, v))
-}
-
-func (s *Jump) String() string {
- // Be robust against malformed CFG.
- block := -1
- if s.block != nil && len(s.block.Succs) == 1 {
- block = s.block.Succs[0].Index
- }
- str := fmt.Sprintf("Jump → b%d", block)
- if s.Comment != "" {
- str = fmt.Sprintf("%s # %s", str, s.Comment)
- }
- return str
-}
-
-func (s *Unreachable) String() string {
- // Be robust against malformed CFG.
- block := -1
- if s.block != nil && len(s.block.Succs) == 1 {
- block = s.block.Succs[0].Index
- }
- return fmt.Sprintf("Unreachable → b%d", block)
-}
-
-func (s *If) String() string {
- // Be robust against malformed CFG.
- tblock, fblock := -1, -1
- if s.block != nil && len(s.block.Succs) == 2 {
- tblock = s.block.Succs[0].Index
- fblock = s.block.Succs[1].Index
- }
- return fmt.Sprintf("If %s → b%d b%d", relName(s.Cond, s), tblock, fblock)
-}
-
-func (s *ConstantSwitch) String() string {
- var b bytes.Buffer
- fmt.Fprintf(&b, "ConstantSwitch %s", relName(s.Tag, s))
- for _, cond := range s.Conds {
- fmt.Fprintf(&b, " %s", relName(cond, s))
- }
- fmt.Fprint(&b, " →")
- for _, succ := range s.block.Succs {
- fmt.Fprintf(&b, " b%d", succ.Index)
- }
- return b.String()
-}
-
-func (s *TypeSwitch) String() string {
- from := s.Parent().pkg()
- var b bytes.Buffer
- fmt.Fprintf(&b, "TypeSwitch <%s> %s", relType(s.typ, from), relName(s.Tag, s))
- for _, cond := range s.Conds {
- fmt.Fprintf(&b, " %q", relType(cond, s.block.parent.pkg()))
- }
- return b.String()
-}
-
-func (s *Go) String() string {
- return printCall(&s.Call, "Go", s)
-}
-
-func (s *Panic) String() string {
- // Be robust against malformed CFG.
- block := -1
- if s.block != nil && len(s.block.Succs) == 1 {
- block = s.block.Succs[0].Index
- }
- return fmt.Sprintf("Panic %s → b%d", relName(s.X, s), block)
-}
-
-func (s *Return) String() string {
- var b bytes.Buffer
- b.WriteString("Return")
- for _, r := range s.Results {
- b.WriteString(" ")
- b.WriteString(relName(r, s))
- }
- return b.String()
-}
-
-func (*RunDefers) String() string {
- return "RunDefers"
-}
-
-func (s *Send) String() string {
- return fmt.Sprintf("Send %s %s", relName(s.Chan, s), relName(s.X, s))
-}
-
-func (recv *Recv) String() string {
- from := recv.Parent().pkg()
- return fmt.Sprintf("Recv <%s> %s", relType(recv.Type(), from), relName(recv.Chan, recv))
-}
-
-func (s *Defer) String() string {
- return printCall(&s.Call, "Defer", s)
-}
-
-func (s *Select) String() string {
- var b bytes.Buffer
- for i, st := range s.States {
- if i > 0 {
- b.WriteString(", ")
- }
- if st.Dir == types.RecvOnly {
- b.WriteString("<-")
- b.WriteString(relName(st.Chan, s))
- } else {
- b.WriteString(relName(st.Chan, s))
- b.WriteString("<-")
- b.WriteString(relName(st.Send, s))
- }
- }
- non := ""
- if !s.Blocking {
- non = "Non"
- }
- from := s.Parent().pkg()
- return fmt.Sprintf("Select%sBlocking <%s> [%s]", non, relType(s.Type(), from), b.String())
-}
-
-func (s *Store) String() string {
- return fmt.Sprintf("Store {%s} %s %s",
- s.Val.Type(), relName(s.Addr, s), relName(s.Val, s))
-}
-
-func (s *BlankStore) String() string {
- return fmt.Sprintf("BlankStore %s", relName(s.Val, s))
-}
-
-func (s *MapUpdate) String() string {
- return fmt.Sprintf("MapUpdate %s %s %s", relName(s.Map, s), relName(s.Key, s), relName(s.Value, s))
-}
-
-func (s *DebugRef) String() string {
- p := s.Parent().Prog.Fset.Position(s.Pos())
- var descr interface{}
- if s.object != nil {
- descr = s.object // e.g. "var x int"
- } else {
- descr = reflect.TypeOf(s.Expr) // e.g. "*ast.CallExpr"
- }
- var addr string
- if s.IsAddr {
- addr = "address of "
- }
- return fmt.Sprintf("; %s%s @ %d:%d is %s", addr, descr, p.Line, p.Column, s.X.Name())
-}
-
-func (p *Package) String() string {
- return "package " + p.Pkg.Path()
-}
-
-var _ io.WriterTo = (*Package)(nil) // *Package implements io.Writer
-
-func (p *Package) WriteTo(w io.Writer) (int64, error) {
- var buf bytes.Buffer
- WritePackage(&buf, p)
- n, err := w.Write(buf.Bytes())
- return int64(n), err
-}
-
-// WritePackage writes to buf a human-readable summary of p.
-func WritePackage(buf *bytes.Buffer, p *Package) {
- fmt.Fprintf(buf, "%s:\n", p)
-
- var names []string
- maxname := 0
- for name := range p.Members {
- if l := len(name); l > maxname {
- maxname = l
- }
- names = append(names, name)
- }
-
- from := p.Pkg
- sort.Strings(names)
- for _, name := range names {
- switch mem := p.Members[name].(type) {
- case *NamedConst:
- fmt.Fprintf(buf, " const %-*s %s = %s\n",
- maxname, name, mem.Name(), mem.Value.RelString(from))
-
- case *Function:
- fmt.Fprintf(buf, " func %-*s %s\n",
- maxname, name, relType(mem.Type(), from))
-
- case *Type:
- fmt.Fprintf(buf, " type %-*s %s\n",
- maxname, name, relType(mem.Type().Underlying(), from))
- for _, meth := range typeutil.IntuitiveMethodSet(mem.Type(), &p.Prog.MethodSets) {
- fmt.Fprintf(buf, " %s\n", types.SelectionString(meth, types.RelativeTo(from)))
- }
-
- case *Global:
- fmt.Fprintf(buf, " var %-*s %s\n",
- maxname, name, relType(mem.Type().(*types.Pointer).Elem(), from))
- }
- }
-
- fmt.Fprintf(buf, "\n")
-}
diff --git a/vendor/honnef.co/go/tools/ir/sanity.go b/vendor/honnef.co/go/tools/ir/sanity.go
deleted file mode 100644
index ff9edbc6463..00000000000
--- a/vendor/honnef.co/go/tools/ir/sanity.go
+++ /dev/null
@@ -1,555 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ir
-
-// An optional pass for sanity-checking invariants of the IR representation.
-// Currently it checks CFG invariants but little at the instruction level.
-
-import (
- "fmt"
- "go/types"
- "io"
- "os"
- "strings"
-)
-
-type sanity struct {
- reporter io.Writer
- fn *Function
- block *BasicBlock
- instrs map[Instruction]struct{}
- insane bool
-}
-
-// sanityCheck performs integrity checking of the IR representation
-// of the function fn and returns true if it was valid. Diagnostics
-// are written to reporter if non-nil, os.Stderr otherwise. Some
-// diagnostics are only warnings and do not imply a negative result.
-//
-// Sanity-checking is intended to facilitate the debugging of code
-// transformation passes.
-//
-func sanityCheck(fn *Function, reporter io.Writer) bool {
- if reporter == nil {
- reporter = os.Stderr
- }
- return (&sanity{reporter: reporter}).checkFunction(fn)
-}
-
-// mustSanityCheck is like sanityCheck but panics instead of returning
-// a negative result.
-//
-func mustSanityCheck(fn *Function, reporter io.Writer) {
- if !sanityCheck(fn, reporter) {
- fn.WriteTo(os.Stderr)
- panic("SanityCheck failed")
- }
-}
-
-func (s *sanity) diagnostic(prefix, format string, args ...interface{}) {
- fmt.Fprintf(s.reporter, "%s: function %s", prefix, s.fn)
- if s.block != nil {
- fmt.Fprintf(s.reporter, ", block %s", s.block)
- }
- io.WriteString(s.reporter, ": ")
- fmt.Fprintf(s.reporter, format, args...)
- io.WriteString(s.reporter, "\n")
-}
-
-func (s *sanity) errorf(format string, args ...interface{}) {
- s.insane = true
- s.diagnostic("Error", format, args...)
-}
-
-func (s *sanity) warnf(format string, args ...interface{}) {
- s.diagnostic("Warning", format, args...)
-}
-
-// findDuplicate returns an arbitrary basic block that appeared more
-// than once in blocks, or nil if all were unique.
-func findDuplicate(blocks []*BasicBlock) *BasicBlock {
- if len(blocks) < 2 {
- return nil
- }
- if blocks[0] == blocks[1] {
- return blocks[0]
- }
- // Slow path:
- m := make(map[*BasicBlock]bool)
- for _, b := range blocks {
- if m[b] {
- return b
- }
- m[b] = true
- }
- return nil
-}
-
-func (s *sanity) checkInstr(idx int, instr Instruction) {
- switch instr := instr.(type) {
- case *If, *Jump, *Return, *Panic, *Unreachable, *ConstantSwitch:
- s.errorf("control flow instruction not at end of block")
- case *Sigma:
- if idx > 0 {
- prev := s.block.Instrs[idx-1]
- if _, ok := prev.(*Sigma); !ok {
- s.errorf("Sigma instruction follows a non-Sigma: %T", prev)
- }
- }
- case *Phi:
- if idx == 0 {
- // It suffices to apply this check to just the first phi node.
- if dup := findDuplicate(s.block.Preds); dup != nil {
- s.errorf("phi node in block with duplicate predecessor %s", dup)
- }
- } else {
- prev := s.block.Instrs[idx-1]
- switch prev.(type) {
- case *Phi, *Sigma:
- default:
- s.errorf("Phi instruction follows a non-Phi, non-Sigma: %T", prev)
- }
- }
- if ne, np := len(instr.Edges), len(s.block.Preds); ne != np {
- s.errorf("phi node has %d edges but %d predecessors", ne, np)
-
- } else {
- for i, e := range instr.Edges {
- if e == nil {
- s.errorf("phi node '%v' has no value for edge #%d from %s", instr, i, s.block.Preds[i])
- }
- }
- }
-
- case *Alloc:
- if !instr.Heap {
- found := false
- for _, l := range s.fn.Locals {
- if l == instr {
- found = true
- break
- }
- }
- if !found {
- s.errorf("local alloc %s = %s does not appear in Function.Locals", instr.Name(), instr)
- }
- }
-
- case *BinOp:
- case *Call:
- case *ChangeInterface:
- case *ChangeType:
- case *Convert:
- if _, ok := instr.X.Type().Underlying().(*types.Basic); !ok {
- if _, ok := instr.Type().Underlying().(*types.Basic); !ok {
- s.errorf("convert %s -> %s: at least one type must be basic", instr.X.Type(), instr.Type())
- }
- }
-
- case *Defer:
- case *Extract:
- case *Field:
- case *FieldAddr:
- case *Go:
- case *Index:
- case *IndexAddr:
- case *MapLookup:
- case *StringLookup:
- case *MakeChan:
- case *MakeClosure:
- numFree := len(instr.Fn.(*Function).FreeVars)
- numBind := len(instr.Bindings)
- if numFree != numBind {
- s.errorf("MakeClosure has %d Bindings for function %s with %d free vars",
- numBind, instr.Fn, numFree)
-
- }
- if recv := instr.Type().(*types.Signature).Recv(); recv != nil {
- s.errorf("MakeClosure's type includes receiver %s", recv.Type())
- }
-
- case *MakeInterface:
- case *MakeMap:
- case *MakeSlice:
- case *MapUpdate:
- case *Next:
- case *Range:
- case *RunDefers:
- case *Select:
- case *Send:
- case *Slice:
- case *Store:
- case *TypeAssert:
- case *UnOp:
- case *DebugRef:
- case *BlankStore:
- case *Load:
- case *Parameter:
- case *Const:
- case *Recv:
- case *TypeSwitch:
- default:
- panic(fmt.Sprintf("Unknown instruction type: %T", instr))
- }
-
- if call, ok := instr.(CallInstruction); ok {
- if call.Common().Signature() == nil {
- s.errorf("nil signature: %s", call)
- }
- }
-
- // Check that value-defining instructions have valid types
- // and a valid referrer list.
- if v, ok := instr.(Value); ok {
- t := v.Type()
- if t == nil {
- s.errorf("no type: %s = %s", v.Name(), v)
- } else if t == tRangeIter {
- // not a proper type; ignore.
- } else if b, ok := t.Underlying().(*types.Basic); ok && b.Info()&types.IsUntyped != 0 {
- if _, ok := v.(*Const); !ok {
- s.errorf("instruction has 'untyped' result: %s = %s : %s", v.Name(), v, t)
- }
- }
- s.checkReferrerList(v)
- }
-
- // Untyped constants are legal as instruction Operands(),
- // for example:
- // _ = "foo"[0]
- // or:
- // if wordsize==64 {...}
-
- // All other non-Instruction Values can be found via their
- // enclosing Function or Package.
-}
-
-func (s *sanity) checkFinalInstr(instr Instruction) {
- switch instr := instr.(type) {
- case *If:
- if nsuccs := len(s.block.Succs); nsuccs != 2 {
- s.errorf("If-terminated block has %d successors; expected 2", nsuccs)
- return
- }
- if s.block.Succs[0] == s.block.Succs[1] {
- s.errorf("If-instruction has same True, False target blocks: %s", s.block.Succs[0])
- return
- }
-
- case *Jump:
- if nsuccs := len(s.block.Succs); nsuccs != 1 {
- s.errorf("Jump-terminated block has %d successors; expected 1", nsuccs)
- return
- }
-
- case *Return:
- if nsuccs := len(s.block.Succs); nsuccs != 0 {
- s.errorf("Return-terminated block has %d successors; expected none", nsuccs)
- return
- }
- if na, nf := len(instr.Results), s.fn.Signature.Results().Len(); nf != na {
- s.errorf("%d-ary return in %d-ary function", na, nf)
- }
-
- case *Panic:
- if nsuccs := len(s.block.Succs); nsuccs != 1 {
- s.errorf("Panic-terminated block has %d successors; expected one", nsuccs)
- return
- }
-
- case *Unreachable:
- if nsuccs := len(s.block.Succs); nsuccs != 1 {
- s.errorf("Unreachable-terminated block has %d successors; expected one", nsuccs)
- return
- }
-
- case *ConstantSwitch:
-
- default:
- s.errorf("non-control flow instruction at end of block")
- }
-}
-
-func (s *sanity) checkBlock(b *BasicBlock, index int) {
- s.block = b
-
- if b.Index != index {
- s.errorf("block has incorrect Index %d", b.Index)
- }
- if b.parent != s.fn {
- s.errorf("block has incorrect parent %s", b.parent)
- }
-
- // Check all blocks are reachable.
- // (The entry block is always implicitly reachable, the exit block may be unreachable.)
- if index > 1 && len(b.Preds) == 0 {
- s.warnf("unreachable block")
- if b.Instrs == nil {
- // Since this block is about to be pruned,
- // tolerating transient problems in it
- // simplifies other optimizations.
- return
- }
- }
-
- // Check predecessor and successor relations are dual,
- // and that all blocks in CFG belong to same function.
- for _, a := range b.Preds {
- found := false
- for _, bb := range a.Succs {
- if bb == b {
- found = true
- break
- }
- }
- if !found {
- s.errorf("expected successor edge in predecessor %s; found only: %s", a, a.Succs)
- }
- if a.parent != s.fn {
- s.errorf("predecessor %s belongs to different function %s", a, a.parent)
- }
- }
- for _, c := range b.Succs {
- found := false
- for _, bb := range c.Preds {
- if bb == b {
- found = true
- break
- }
- }
- if !found {
- s.errorf("expected predecessor edge in successor %s; found only: %s", c, c.Preds)
- }
- if c.parent != s.fn {
- s.errorf("successor %s belongs to different function %s", c, c.parent)
- }
- }
-
- // Check each instruction is sane.
- n := len(b.Instrs)
- if n == 0 {
- s.errorf("basic block contains no instructions")
- }
- var rands [10]*Value // reuse storage
- for j, instr := range b.Instrs {
- if instr == nil {
- s.errorf("nil instruction at index %d", j)
- continue
- }
- if b2 := instr.Block(); b2 == nil {
- s.errorf("nil Block() for instruction at index %d", j)
- continue
- } else if b2 != b {
- s.errorf("wrong Block() (%s) for instruction at index %d ", b2, j)
- continue
- }
- if j < n-1 {
- s.checkInstr(j, instr)
- } else {
- s.checkFinalInstr(instr)
- }
-
- // Check Instruction.Operands.
- operands:
- for i, op := range instr.Operands(rands[:0]) {
- if op == nil {
- s.errorf("nil operand pointer %d of %s", i, instr)
- continue
- }
- val := *op
- if val == nil {
- continue // a nil operand is ok
- }
-
- // Check that "untyped" types only appear on constant operands.
- if _, ok := (*op).(*Const); !ok {
- if basic, ok := (*op).Type().(*types.Basic); ok {
- if basic.Info()&types.IsUntyped != 0 {
- s.errorf("operand #%d of %s is untyped: %s", i, instr, basic)
- }
- }
- }
-
- // Check that Operands that are also Instructions belong to same function.
- // TODO(adonovan): also check their block dominates block b.
- if val, ok := val.(Instruction); ok {
- if val.Block() == nil {
- s.errorf("operand %d of %s is an instruction (%s) that belongs to no block", i, instr, val)
- } else if val.Parent() != s.fn {
- s.errorf("operand %d of %s is an instruction (%s) from function %s", i, instr, val, val.Parent())
- }
- }
-
- // Check that each function-local operand of
- // instr refers back to instr. (NB: quadratic)
- switch val := val.(type) {
- case *Const, *Global, *Builtin:
- continue // not local
- case *Function:
- if val.parent == nil {
- continue // only anon functions are local
- }
- }
-
- // TODO(adonovan): check val.Parent() != nil <=> val.Referrers() is defined.
-
- if refs := val.Referrers(); refs != nil {
- for _, ref := range *refs {
- if ref == instr {
- continue operands
- }
- }
- s.errorf("operand %d of %s (%s) does not refer to us", i, instr, val)
- } else {
- s.errorf("operand %d of %s (%s) has no referrers", i, instr, val)
- }
- }
- }
-}
-
-func (s *sanity) checkReferrerList(v Value) {
- refs := v.Referrers()
- if refs == nil {
- s.errorf("%s has missing referrer list", v.Name())
- return
- }
- for i, ref := range *refs {
- if _, ok := s.instrs[ref]; !ok {
- if val, ok := ref.(Value); ok {
- s.errorf("%s.Referrers()[%d] = %s = %s is not an instruction belonging to this function", v.Name(), i, val.Name(), val)
- } else {
- s.errorf("%s.Referrers()[%d] = %s is not an instruction belonging to this function", v.Name(), i, ref)
- }
- }
- }
-}
-
-func (s *sanity) checkFunction(fn *Function) bool {
- // TODO(adonovan): check Function invariants:
- // - check params match signature
- // - check transient fields are nil
- // - warn if any fn.Locals do not appear among block instructions.
- s.fn = fn
- if fn.Prog == nil {
- s.errorf("nil Prog")
- }
-
- _ = fn.String() // must not crash
- _ = fn.RelString(fn.pkg()) // must not crash
-
- // All functions have a package, except delegates (which are
- // shared across packages, or duplicated as weak symbols in a
- // separate-compilation model), and error.Error.
- if fn.Pkg == nil {
- if strings.HasPrefix(fn.Synthetic, "wrapper ") ||
- strings.HasPrefix(fn.Synthetic, "bound ") ||
- strings.HasPrefix(fn.Synthetic, "thunk ") ||
- strings.HasSuffix(fn.name, "Error") {
- // ok
- } else {
- s.errorf("nil Pkg")
- }
- }
- if src, syn := fn.Synthetic == "", fn.source != nil; src != syn {
- s.errorf("got fromSource=%t, hasSyntax=%t; want same values", src, syn)
- }
- for i, l := range fn.Locals {
- if l.Parent() != fn {
- s.errorf("Local %s at index %d has wrong parent", l.Name(), i)
- }
- if l.Heap {
- s.errorf("Local %s at index %d has Heap flag set", l.Name(), i)
- }
- }
- // Build the set of valid referrers.
- s.instrs = make(map[Instruction]struct{})
- for _, b := range fn.Blocks {
- for _, instr := range b.Instrs {
- s.instrs[instr] = struct{}{}
- }
- }
- for i, p := range fn.Params {
- if p.Parent() != fn {
- s.errorf("Param %s at index %d has wrong parent", p.Name(), i)
- }
- // Check common suffix of Signature and Params match type.
- if sig := fn.Signature; sig != nil {
- j := i - len(fn.Params) + sig.Params().Len() // index within sig.Params
- if j < 0 {
- continue
- }
- if !types.Identical(p.Type(), sig.Params().At(j).Type()) {
- s.errorf("Param %s at index %d has wrong type (%s, versus %s in Signature)", p.Name(), i, p.Type(), sig.Params().At(j).Type())
-
- }
- }
-
- s.checkReferrerList(p)
- }
- for i, fv := range fn.FreeVars {
- if fv.Parent() != fn {
- s.errorf("FreeVar %s at index %d has wrong parent", fv.Name(), i)
- }
- s.checkReferrerList(fv)
- }
-
- if fn.Blocks != nil && len(fn.Blocks) == 0 {
- // Function _had_ blocks (so it's not external) but
- // they were "optimized" away, even the entry block.
- s.errorf("Blocks slice is non-nil but empty")
- }
- for i, b := range fn.Blocks {
- if b == nil {
- s.warnf("nil *BasicBlock at f.Blocks[%d]", i)
- continue
- }
- s.checkBlock(b, i)
- }
-
- s.block = nil
- for i, anon := range fn.AnonFuncs {
- if anon.Parent() != fn {
- s.errorf("AnonFuncs[%d]=%s but %s.Parent()=%s", i, anon, anon, anon.Parent())
- }
- }
- s.fn = nil
- return !s.insane
-}
-
-// sanityCheckPackage checks invariants of packages upon creation.
-// It does not require that the package is built.
-// Unlike sanityCheck (for functions), it just panics at the first error.
-func sanityCheckPackage(pkg *Package) {
- if pkg.Pkg == nil {
- panic(fmt.Sprintf("Package %s has no Object", pkg))
- }
- _ = pkg.String() // must not crash
-
- for name, mem := range pkg.Members {
- if name != mem.Name() {
- panic(fmt.Sprintf("%s: %T.Name() = %s, want %s",
- pkg.Pkg.Path(), mem, mem.Name(), name))
- }
- obj := mem.Object()
- if obj == nil {
- // This check is sound because fields
- // {Global,Function}.object have type
- // types.Object. (If they were declared as
- // *types.{Var,Func}, we'd have a non-empty
- // interface containing a nil pointer.)
-
- continue // not all members have typechecker objects
- }
- if obj.Name() != name {
- if obj.Name() == "init" && strings.HasPrefix(mem.Name(), "init#") {
- // Ok. The name of a declared init function varies between
- // its types.Func ("init") and its ir.Function ("init#%d").
- } else {
- panic(fmt.Sprintf("%s: %T.Object().Name() = %s, want %s",
- pkg.Pkg.Path(), mem, obj.Name(), name))
- }
- }
- }
-}
diff --git a/vendor/honnef.co/go/tools/ir/source.go b/vendor/honnef.co/go/tools/ir/source.go
deleted file mode 100644
index 93d1ccbd290..00000000000
--- a/vendor/honnef.co/go/tools/ir/source.go
+++ /dev/null
@@ -1,270 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ir
-
-// This file defines utilities for working with source positions
-// or source-level named entities ("objects").
-
-// TODO(adonovan): test that {Value,Instruction}.Pos() positions match
-// the originating syntax, as specified.
-
-import (
- "go/ast"
- "go/token"
- "go/types"
-)
-
-// EnclosingFunction returns the function that contains the syntax
-// node denoted by path.
-//
-// Syntax associated with package-level variable specifications is
-// enclosed by the package's init() function.
-//
-// Returns nil if not found; reasons might include:
-// - the node is not enclosed by any function.
-// - the node is within an anonymous function (FuncLit) and
-// its IR function has not been created yet
-// (pkg.Build() has not yet been called).
-//
-func EnclosingFunction(pkg *Package, path []ast.Node) *Function {
- // Start with package-level function...
- fn := findEnclosingPackageLevelFunction(pkg, path)
- if fn == nil {
- return nil // not in any function
- }
-
- // ...then walk down the nested anonymous functions.
- n := len(path)
-outer:
- for i := range path {
- if lit, ok := path[n-1-i].(*ast.FuncLit); ok {
- for _, anon := range fn.AnonFuncs {
- if anon.Pos() == lit.Type.Func {
- fn = anon
- continue outer
- }
- }
- // IR function not found:
- // - package not yet built, or maybe
- // - builder skipped FuncLit in dead block
- // (in principle; but currently the Builder
- // generates even dead FuncLits).
- return nil
- }
- }
- return fn
-}
-
-// HasEnclosingFunction returns true if the AST node denoted by path
-// is contained within the declaration of some function or
-// package-level variable.
-//
-// Unlike EnclosingFunction, the behaviour of this function does not
-// depend on whether IR code for pkg has been built, so it can be
-// used to quickly reject check inputs that will cause
-// EnclosingFunction to fail, prior to IR building.
-//
-func HasEnclosingFunction(pkg *Package, path []ast.Node) bool {
- return findEnclosingPackageLevelFunction(pkg, path) != nil
-}
-
-// findEnclosingPackageLevelFunction returns the Function
-// corresponding to the package-level function enclosing path.
-//
-func findEnclosingPackageLevelFunction(pkg *Package, path []ast.Node) *Function {
- if n := len(path); n >= 2 { // [... {Gen,Func}Decl File]
- switch decl := path[n-2].(type) {
- case *ast.GenDecl:
- if decl.Tok == token.VAR && n >= 3 {
- // Package-level 'var' initializer.
- return pkg.init
- }
-
- case *ast.FuncDecl:
- // Declared function/method.
- fn := findNamedFunc(pkg, decl.Pos())
- if fn == nil && decl.Recv == nil && decl.Name.Name == "init" {
- // Hack: return non-nil when IR is not yet
- // built so that HasEnclosingFunction works.
- return pkg.init
- }
- return fn
- }
- }
- return nil // not in any function
-}
-
-// findNamedFunc returns the named function whose FuncDecl.Ident is at
-// position pos.
-//
-func findNamedFunc(pkg *Package, pos token.Pos) *Function {
- for _, fn := range pkg.Functions {
- if fn.Pos() == pos {
- return fn
- }
- }
- return nil
-}
-
-// ValueForExpr returns the IR Value that corresponds to non-constant
-// expression e.
-//
-// It returns nil if no value was found, e.g.
-// - the expression is not lexically contained within f;
-// - f was not built with debug information; or
-// - e is a constant expression. (For efficiency, no debug
-// information is stored for constants. Use
-// go/types.Info.Types[e].Value instead.)
-// - e is a reference to nil or a built-in function.
-// - the value was optimised away.
-//
-// If e is an addressable expression used in an lvalue context,
-// value is the address denoted by e, and isAddr is true.
-//
-// The types of e (or &e, if isAddr) and the result are equal
-// (modulo "untyped" bools resulting from comparisons).
-//
-// (Tip: to find the ir.Value given a source position, use
-// astutil.PathEnclosingInterval to locate the ast.Node, then
-// EnclosingFunction to locate the Function, then ValueForExpr to find
-// the ir.Value.)
-//
-func (f *Function) ValueForExpr(e ast.Expr) (value Value, isAddr bool) {
- if f.debugInfo() { // (opt)
- e = unparen(e)
- for _, b := range f.Blocks {
- for _, instr := range b.Instrs {
- if ref, ok := instr.(*DebugRef); ok {
- if ref.Expr == e {
- return ref.X, ref.IsAddr
- }
- }
- }
- }
- }
- return
-}
-
-// --- Lookup functions for source-level named entities (types.Objects) ---
-
-// Package returns the IR Package corresponding to the specified
-// type-checker package object.
-// It returns nil if no such IR package has been created.
-//
-func (prog *Program) Package(obj *types.Package) *Package {
- return prog.packages[obj]
-}
-
-// packageLevelValue returns the package-level value corresponding to
-// the specified named object, which may be a package-level const
-// (*Const), var (*Global) or func (*Function) of some package in
-// prog. It returns nil if the object is not found.
-//
-func (prog *Program) packageLevelValue(obj types.Object) Value {
- if pkg, ok := prog.packages[obj.Pkg()]; ok {
- return pkg.values[obj]
- }
- return nil
-}
-
-// FuncValue returns the concrete Function denoted by the source-level
-// named function obj, or nil if obj denotes an interface method.
-//
-// TODO(adonovan): check the invariant that obj.Type() matches the
-// result's Signature, both in the params/results and in the receiver.
-//
-func (prog *Program) FuncValue(obj *types.Func) *Function {
- fn, _ := prog.packageLevelValue(obj).(*Function)
- return fn
-}
-
-// ConstValue returns the IR Value denoted by the source-level named
-// constant obj.
-//
-func (prog *Program) ConstValue(obj *types.Const) *Const {
- // TODO(adonovan): opt: share (don't reallocate)
- // Consts for const objects and constant ast.Exprs.
-
- // Universal constant? {true,false,nil}
- if obj.Parent() == types.Universe {
- return NewConst(obj.Val(), obj.Type())
- }
- // Package-level named constant?
- if v := prog.packageLevelValue(obj); v != nil {
- return v.(*Const)
- }
- return NewConst(obj.Val(), obj.Type())
-}
-
-// VarValue returns the IR Value that corresponds to a specific
-// identifier denoting the source-level named variable obj.
-//
-// VarValue returns nil if a local variable was not found, perhaps
-// because its package was not built, the debug information was not
-// requested during IR construction, or the value was optimized away.
-//
-// ref is the path to an ast.Ident (e.g. from PathEnclosingInterval),
-// and that ident must resolve to obj.
-//
-// pkg is the package enclosing the reference. (A reference to a var
-// always occurs within a function, so we need to know where to find it.)
-//
-// If the identifier is a field selector and its base expression is
-// non-addressable, then VarValue returns the value of that field.
-// For example:
-// func f() struct {x int}
-// f().x // VarValue(x) returns a *Field instruction of type int
-//
-// All other identifiers denote addressable locations (variables).
-// For them, VarValue may return either the variable's address or its
-// value, even when the expression is evaluated only for its value; the
-// situation is reported by isAddr, the second component of the result.
-//
-// If !isAddr, the returned value is the one associated with the
-// specific identifier. For example,
-// var x int // VarValue(x) returns Const 0 here
-// x = 1 // VarValue(x) returns Const 1 here
-//
-// It is not specified whether the value or the address is returned in
-// any particular case, as it may depend upon optimizations performed
-// during IR code generation, such as registerization, constant
-// folding, avoidance of materialization of subexpressions, etc.
-//
-func (prog *Program) VarValue(obj *types.Var, pkg *Package, ref []ast.Node) (value Value, isAddr bool) {
- // All references to a var are local to some function, possibly init.
- fn := EnclosingFunction(pkg, ref)
- if fn == nil {
- return // e.g. def of struct field; IR not built?
- }
-
- id := ref[0].(*ast.Ident)
-
- // Defining ident of a parameter?
- if id.Pos() == obj.Pos() {
- for _, param := range fn.Params {
- if param.Object() == obj {
- return param, false
- }
- }
- }
-
- // Other ident?
- for _, b := range fn.Blocks {
- for _, instr := range b.Instrs {
- if dr, ok := instr.(*DebugRef); ok {
- if dr.Pos() == id.Pos() {
- return dr.X, dr.IsAddr
- }
- }
- }
- }
-
- // Defining ident of package-level var?
- if v := prog.packageLevelValue(obj); v != nil {
- return v.(*Global), true
- }
-
- return // e.g. debug info not requested, or var optimized away
-}
diff --git a/vendor/honnef.co/go/tools/ir/ssa.go b/vendor/honnef.co/go/tools/ir/ssa.go
deleted file mode 100644
index 49693045f0b..00000000000
--- a/vendor/honnef.co/go/tools/ir/ssa.go
+++ /dev/null
@@ -1,1856 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ir
-
-// This package defines a high-level intermediate representation for
-// Go programs using static single-information (SSI) form.
-
-import (
- "fmt"
- "go/ast"
- "go/constant"
- "go/token"
- "go/types"
- "sync"
-
- "golang.org/x/tools/go/types/typeutil"
-)
-
-type ID int
-
-// A Program is a partial or complete Go program converted to IR form.
-type Program struct {
- Fset *token.FileSet // position information for the files of this Program
- PrintFunc string // create ir.html for function specified in PrintFunc
- imported map[string]*Package // all importable Packages, keyed by import path
- packages map[*types.Package]*Package // all loaded Packages, keyed by object
- mode BuilderMode // set of mode bits for IR construction
- MethodSets typeutil.MethodSetCache // cache of type-checker's method-sets
-
- methodsMu sync.Mutex // guards the following maps:
- methodSets typeutil.Map // maps type to its concrete methodSet
- runtimeTypes typeutil.Map // types for which rtypes are needed
- canon typeutil.Map // type canonicalization map
- bounds map[*types.Func]*Function // bounds for curried x.Method closures
- thunks map[selectionKey]*Function // thunks for T.Method expressions
-}
-
-// A Package is a single analyzed Go package containing Members for
-// all package-level functions, variables, constants and types it
-// declares. These may be accessed directly via Members, or via the
-// type-specific accessor methods Func, Type, Var and Const.
-//
-// Members also contains entries for "init" (the synthetic package
-// initializer) and "init#%d", the nth declared init function,
-// and unspecified other things too.
-//
-type Package struct {
- Prog *Program // the owning program
- Pkg *types.Package // the corresponding go/types.Package
- Members map[string]Member // all package members keyed by name (incl. init and init#%d)
- Functions []*Function // all functions, excluding anonymous ones
- values map[types.Object]Value // package members (incl. types and methods), keyed by object
- init *Function // Func("init"); the package's init function
- debug bool // include full debug info in this package
- printFunc string // which function to print in HTML form
-
- // The following fields are set transiently, then cleared
- // after building.
- buildOnce sync.Once // ensures package building occurs once
- ninit int32 // number of init functions
- info *types.Info // package type information
- files []*ast.File // package ASTs
-}
-
-// A Member is a member of a Go package, implemented by *NamedConst,
-// *Global, *Function, or *Type; they are created by package-level
-// const, var, func and type declarations respectively.
-//
-type Member interface {
- Name() string // declared name of the package member
- String() string // package-qualified name of the package member
- RelString(*types.Package) string // like String, but relative refs are unqualified
- Object() types.Object // typechecker's object for this member, if any
- Type() types.Type // type of the package member
- Token() token.Token // token.{VAR,FUNC,CONST,TYPE}
- Package() *Package // the containing package
-}
-
-// A Type is a Member of a Package representing a package-level named type.
-type Type struct {
- object *types.TypeName
- pkg *Package
-}
-
-// A NamedConst is a Member of a Package representing a package-level
-// named constant.
-//
-// Pos() returns the position of the declaring ast.ValueSpec.Names[*]
-// identifier.
-//
-// NB: a NamedConst is not a Value; it contains a constant Value, which
-// it augments with the name and position of its 'const' declaration.
-//
-type NamedConst struct {
- object *types.Const
- Value *Const
- pkg *Package
-}
-
-// A Value is an IR value that can be referenced by an instruction.
-type Value interface {
- setID(ID)
-
- // Name returns the name of this value, and determines how
- // this Value appears when used as an operand of an
- // Instruction.
- //
- // This is the same as the source name for Parameters,
- // Builtins, Functions, FreeVars, Globals.
- // For constants, it is a representation of the constant's value
- // and type. For all other Values this is the name of the
- // virtual register defined by the instruction.
- //
- // The name of an IR Value is not semantically significant,
- // and may not even be unique within a function.
- Name() string
-
- // ID returns the ID of this value. IDs are unique within a single
- // function and are densely numbered, but may contain gaps.
- // Values and other Instructions share the same ID space.
- // Globally, values are identified by their addresses. However,
- // IDs exist to facilitate efficient storage of mappings between
- // values and data when analysing functions.
- //
- // NB: IDs are allocated late in the IR construction process and
- // are not available to early stages of said process.
- ID() ID
-
- // If this value is an Instruction, String returns its
- // disassembled form; otherwise it returns unspecified
- // human-readable information about the Value, such as its
- // kind, name and type.
- String() string
-
- // Type returns the type of this value. Many instructions
- // (e.g. IndexAddr) change their behaviour depending on the
- // types of their operands.
- Type() types.Type
-
- // Parent returns the function to which this Value belongs.
- // It returns nil for named Functions, Builtin and Global.
- Parent() *Function
-
- // Referrers returns the list of instructions that have this
- // value as one of their operands; it may contain duplicates
- // if an instruction has a repeated operand.
- //
- // Referrers actually returns a pointer through which the
- // caller may perform mutations to the object's state.
- //
- // Referrers is currently only defined if Parent()!=nil,
- // i.e. for the function-local values FreeVar, Parameter,
- // Functions (iff anonymous) and all value-defining instructions.
- // It returns nil for named Functions, Builtin and Global.
- //
- // Instruction.Operands contains the inverse of this relation.
- Referrers() *[]Instruction
-
- Operands(rands []*Value) []*Value // nil for non-Instructions
-
- // Source returns the AST node responsible for creating this
- // value. A single AST node may be responsible for more than one
- // value, and not all values have an associated AST node.
- //
- // Do not use this method to find a Value given an ast.Expr; use
- // ValueForExpr instead.
- Source() ast.Node
-
- // Pos returns Source().Pos() if Source is not nil, else it
- // returns token.NoPos.
- Pos() token.Pos
-}
-
-// An Instruction is an IR instruction that computes a new Value or
-// has some effect.
-//
-// An Instruction that defines a value (e.g. BinOp) also implements
-// the Value interface; an Instruction that only has an effect (e.g. Store)
-// does not.
-//
-type Instruction interface {
- setSource(ast.Node)
- setID(ID)
-
- // String returns the disassembled form of this value.
- //
- // Examples of Instructions that are Values:
- // "BinOp {+} t1 t2" (BinOp)
- // "Call len t1" (Call)
- // Note that the name of the Value is not printed.
- //
- // Examples of Instructions that are not Values:
- // "Return t1" (Return)
- // "Store {int} t2 t1" (Store)
- //
- // (The separation of Value.Name() from Value.String() is useful
- // for some analyses which distinguish the operation from the
- // value it defines, e.g., 'y = local int' is both an allocation
- // of memory 'local int' and a definition of a pointer y.)
- String() string
-
- // ID returns the ID of this instruction. IDs are unique within a single
- // function and are densely numbered, but may contain gaps.
- // Globally, instructions are identified by their addresses. However,
- // IDs exist to facilitate efficient storage of mappings between
- // instructions and data when analysing functions.
- //
- // NB: IDs are allocated late in the IR construction process and
- // are not available to early stages of said process.
- ID() ID
-
- // Parent returns the function to which this instruction
- // belongs.
- Parent() *Function
-
- // Block returns the basic block to which this instruction
- // belongs.
- Block() *BasicBlock
-
- // setBlock sets the basic block to which this instruction belongs.
- setBlock(*BasicBlock)
-
- // Operands returns the operands of this instruction: the
- // set of Values it references.
- //
- // Specifically, it appends their addresses to rands, a
- // user-provided slice, and returns the resulting slice,
- // permitting avoidance of memory allocation.
- //
- // The operands are appended in undefined order, but the order
- // is consistent for a given Instruction; the addresses are
- // always non-nil but may point to a nil Value. Clients may
- // store through the pointers, e.g. to effect a value
- // renaming.
- //
- // Value.Referrers is a subset of the inverse of this
- // relation. (Referrers are not tracked for all types of
- // Values.)
- Operands(rands []*Value) []*Value
-
- Referrers() *[]Instruction // nil for non-Values
-
- // Source returns the AST node responsible for creating this
- // instruction. A single AST node may be responsible for more than
- // one instruction, and not all instructions have an associated
- // AST node.
- Source() ast.Node
-
- // Pos returns Source().Pos() if Source is not nil, else it
- // returns token.NoPos.
- Pos() token.Pos
-}
-
-// A Node is a node in the IR value graph. Every concrete type that
-// implements Node is also either a Value, an Instruction, or both.
-//
-// Node contains the methods common to Value and Instruction, plus the
-// Operands and Referrers methods generalized to return nil for
-// non-Instructions and non-Values, respectively.
-//
-// Node is provided to simplify IR graph algorithms. Clients should
-// use the more specific and informative Value or Instruction
-// interfaces where appropriate.
-//
-type Node interface {
- setID(ID)
-
- // Common methods:
- ID() ID
- String() string
- Source() ast.Node
- Pos() token.Pos
- Parent() *Function
-
- // Partial methods:
- Operands(rands []*Value) []*Value // nil for non-Instructions
- Referrers() *[]Instruction // nil for non-Values
-}
-
-// Function represents the parameters, results, and code of a function
-// or method.
-//
-// If Blocks is nil, this indicates an external function for which no
-// Go source code is available. In this case, FreeVars and Locals
-// are nil too. Clients performing whole-program analysis must
-// handle external functions specially.
-//
-// Blocks contains the function's control-flow graph (CFG).
-// Blocks[0] is the function entry point; block order is not otherwise
-// semantically significant, though it may affect the readability of
-// the disassembly.
-// To iterate over the blocks in dominance order, use DomPreorder().
-//
-// A nested function (Parent()!=nil) that refers to one or more
-// lexically enclosing local variables ("free variables") has FreeVars.
-// Such functions cannot be called directly but require a
-// value created by MakeClosure which, via its Bindings, supplies
-// values for these parameters.
-//
-// If the function is a method (Signature.Recv() != nil) then the first
-// element of Params is the receiver parameter.
-//
-// A Go package may declare many functions called "init".
-// For each one, Object().Name() returns "init" but Name() returns
-// "init#1", etc, in declaration order.
-//
-// Pos() returns the declaring ast.FuncLit.Type.Func or the position
-// of the ast.FuncDecl.Name, if the function was explicit in the
-// source. Synthetic wrappers, for which Synthetic != "", may share
-// the same position as the function they wrap.
-// Syntax.Pos() always returns the position of the declaring "func" token.
-//
-// Type() returns the function's Signature.
-//
-type Function struct {
- node
-
- name string
- object types.Object // a declared *types.Func or one of its wrappers
- method *types.Selection // info about provenance of synthetic methods
- Signature *types.Signature
-
- Synthetic string // provenance of synthetic function; "" for true source functions
- parent *Function // enclosing function if anon; nil if global
- Pkg *Package // enclosing package; nil for shared funcs (wrappers and error.Error)
- Prog *Program // enclosing program
- Params []*Parameter // function parameters; for methods, includes receiver
- FreeVars []*FreeVar // free variables whose values must be supplied by closure
- Locals []*Alloc // local variables of this function
- Blocks []*BasicBlock // basic blocks of the function; nil => external
- Exit *BasicBlock // The function's exit block
- AnonFuncs []*Function // anonymous functions directly beneath this one
- referrers []Instruction // referring instructions (iff Parent() != nil)
- WillExit bool // Calling this function will always terminate the process
- WillUnwind bool // Calling this function will always unwind (it will call runtime.Goexit or panic)
-
- *functionBody
-}
-
-type functionBody struct {
- // The following fields are set transiently during building,
- // then cleared.
- currentBlock *BasicBlock // where to emit code
- objects map[types.Object]Value // addresses of local variables
- namedResults []*Alloc // tuple of named results
- implicitResults []*Alloc // tuple of results
- targets *targets // linked stack of branch targets
- lblocks map[*ast.Object]*lblock // labelled blocks
- consts []*Const
- wr *HTMLWriter
- fakeExits BlockSet
- blocksets [5]BlockSet
- hasDefer bool
-}
-
-func (fn *Function) results() []*Alloc {
- if len(fn.namedResults) > 0 {
- return fn.namedResults
- }
- return fn.implicitResults
-}
-
-// BasicBlock represents an IR basic block.
-//
-// The final element of Instrs is always an explicit transfer of
-// control (If, Jump, Return, Panic, or Unreachable).
-//
-// A block may contain no Instructions only if it is unreachable,
-// i.e., Preds is nil. Empty blocks are typically pruned.
-//
-// BasicBlocks and their Preds/Succs relation form a (possibly cyclic)
-// graph independent of the IR Value graph: the control-flow graph or
-// CFG. It is illegal for multiple edges to exist between the same
-// pair of blocks.
-//
-// Each BasicBlock is also a node in the dominator tree of the CFG.
-// The tree may be navigated using Idom()/Dominees() and queried using
-// Dominates().
-//
-// The order of Preds and Succs is significant (to Phi and If
-// instructions, respectively).
-//
-type BasicBlock struct {
- Index int // index of this block within Parent().Blocks
- Comment string // optional label; no semantic significance
- parent *Function // parent function
- Instrs []Instruction // instructions in order
- Preds, Succs []*BasicBlock // predecessors and successors
- succs2 [2]*BasicBlock // initial space for Succs
- dom domInfo // dominator tree info
- pdom domInfo // post-dominator tree info
- post int
- gaps int // number of nil Instrs (transient)
- rundefers int // number of rundefers (transient)
-}
-
-// Pure values ----------------------------------------
-
-// A FreeVar represents a free variable of the function to which it
-// belongs.
-//
-// FreeVars are used to implement anonymous functions, whose free
-// variables are lexically captured in a closure formed by
-// MakeClosure. The value of such a free var is an Alloc or another
-// FreeVar and is considered a potentially escaping heap address, with
-// pointer type.
-//
-// FreeVars are also used to implement bound method closures. Such a
-// free var represents the receiver value and may be of any type that
-// has concrete methods.
-//
-// Pos() returns the position of the value that was captured, which
-// belongs to an enclosing function.
-//
-type FreeVar struct {
- node
-
- name string
- typ types.Type
- parent *Function
- referrers []Instruction
-
- // Transiently needed during building.
- outer Value // the Value captured from the enclosing context.
-}
-
-// A Parameter represents an input parameter of a function.
-//
-type Parameter struct {
- register
-
- name string
- object types.Object // a *types.Var; nil for non-source locals
-}
-
-// A Const represents the value of a constant expression.
-//
-// The underlying type of a constant may be any boolean, numeric, or
-// string type. In addition, a Const may represent the nil value of
-// any reference type---interface, map, channel, pointer, slice, or
-// function---but not "untyped nil".
-//
-// All source-level constant expressions are represented by a Const
-// of the same type and value.
-//
-// Value holds the exact value of the constant, independent of its
-// Type(), using the same representation as package go/constant uses for
-// constants, or nil for a typed nil value.
-//
-// Pos() returns token.NoPos.
-//
-// Example printed form:
-// Const {42}
-// Const {"test"}
-// Const {(3 + 4i)}
-//
-type Const struct {
- register
-
- Value constant.Value
-}
-
-// A Global is a named Value holding the address of a package-level
-// variable.
-//
-// Pos() returns the position of the ast.ValueSpec.Names[*]
-// identifier.
-//
-type Global struct {
- node
-
- name string
- object types.Object // a *types.Var; may be nil for synthetics e.g. init$guard
- typ types.Type
-
- Pkg *Package
-}
-
-// A Builtin represents a specific use of a built-in function, e.g. len.
-//
-// Builtins are immutable values. Builtins do not have addresses.
-// Builtins can only appear in CallCommon.Func.
-//
-// Name() indicates the function: one of the built-in functions from the
-// Go spec (excluding "make" and "new") or one of these ir-defined
-// intrinsics:
-//
-// // wrapnilchk returns ptr if non-nil, panics otherwise.
-// // (For use in indirection wrappers.)
-// func ir:wrapnilchk(ptr *T, recvType, methodName string) *T
-//
-// Object() returns a *types.Builtin for built-ins defined by the spec,
-// nil for others.
-//
-// Type() returns a *types.Signature representing the effective
-// signature of the built-in for this call.
-//
-type Builtin struct {
- node
-
- name string
- sig *types.Signature
-}
-
-// Value-defining instructions ----------------------------------------
-
-// The Alloc instruction reserves space for a variable of the given type,
-// zero-initializes it, and yields its address.
-//
-// Alloc values are always addresses, and have pointer types, so the
-// type of the allocated variable is actually
-// Type().Underlying().(*types.Pointer).Elem().
-//
-// If Heap is false, Alloc allocates space in the function's
-// activation record (frame); we refer to an Alloc(Heap=false) as a
-// "stack" alloc. Each stack Alloc returns the same address each time
-// it is executed within the same activation; the space is
-// re-initialized to zero.
-//
-// If Heap is true, Alloc allocates space in the heap; we
-// refer to an Alloc(Heap=true) as a "heap" alloc. Each heap Alloc
-// returns a different address each time it is executed.
-//
-// When Alloc is applied to a channel, map or slice type, it returns
-// the address of an uninitialized (nil) reference of that kind; store
-// the result of MakeSlice, MakeMap or MakeChan in that location to
-// instantiate these types.
-//
-// Pos() returns the ast.CompositeLit.Lbrace for a composite literal,
-// or the ast.CallExpr.Rparen for a call to new() or for a call that
-// allocates a varargs slice.
-//
-// Example printed form:
-// t1 = StackAlloc <*int>
-// t2 = HeapAlloc <*int> (new)
-//
-type Alloc struct {
- register
- Heap bool
- index int // dense numbering; for lifting
-}
-
-var _ Instruction = (*Sigma)(nil)
-var _ Value = (*Sigma)(nil)
-
-// The Sigma instruction represents an SSI σ-node, which splits values
-// at branches in the control flow.
-//
-// Conceptually, σ-nodes exist at the end of blocks that branch and
-// constitute parallel assignments to one value per destination block.
-// However, such a representation would be awkward to work with, so
-// instead we place σ-nodes at the beginning of branch targets. The
-// From field denotes to which incoming edge the node applies.
-//
-// Within a block, all σ-nodes must appear before all non-σ nodes.
-//
-// Example printed form:
-// t2 = Sigma [#0] t1 (x)
-//
-type Sigma struct {
- register
- From *BasicBlock
- X Value
-
- live bool // used during lifting
-}
-
-// The Phi instruction represents an SSA φ-node, which combines values
-// that differ across incoming control-flow edges and yields a new
-// value. Within a block, all φ-nodes must appear before all non-φ, non-σ
-// nodes.
-//
-// Pos() returns the position of the && or || for short-circuit
-// control-flow joins, or that of the *Alloc for φ-nodes inserted
-// during SSA renaming.
-//
-// Example printed form:
-// t3 = Phi 2:t1 4:t2 (x)
-//
-type Phi struct {
- register
- Edges []Value // Edges[i] is value for Block().Preds[i]
-
- live bool // used during lifting
-}
-
-// The Call instruction represents a function or method call.
-//
-// The Call instruction yields the function result if there is exactly
-// one. Otherwise it returns a tuple, the components of which are
-// accessed via Extract.
-//
-// See CallCommon for generic function call documentation.
-//
-// Pos() returns the ast.CallExpr.Lparen, if explicit in the source.
-//
-// Example printed form:
-// t3 = Call <()> println t1 t2
-// t4 = Call <()> foo$1
-// t6 = Invoke t5.String
-//
-type Call struct {
- register
- Call CallCommon
-}
-
-// The BinOp instruction yields the result of binary operation X Op Y.
-//
-// Pos() returns the ast.BinaryExpr.OpPos, if explicit in the source.
-//
-// Example printed form:
-// t3 = BinOp {+} t2 t1
-//
-type BinOp struct {
- register
- // One of:
- // ADD SUB MUL QUO REM + - * / %
- // AND OR XOR SHL SHR AND_NOT & | ^ << >> &^
- // EQL NEQ LSS LEQ GTR GEQ == != < <= < >=
- Op token.Token
- X, Y Value
-}
-
-// The UnOp instruction yields the result of Op X.
-// XOR is bitwise complement.
-// SUB is negation.
-// NOT is logical negation.
-//
-//
-// Example printed form:
-// t2 = UnOp {^} t1
-//
-type UnOp struct {
- register
- Op token.Token // One of: NOT SUB XOR ! - ^
- X Value
-}
-
-// The Load instruction loads a value from a memory address.
-//
-// For implicit memory loads, Pos() returns the position of the
-// most closely associated source-level construct; the details are not
-// specified.
-//
-// Example printed form:
-// t2 = Load t1
-//
-type Load struct {
- register
- X Value
-}
-
-// The ChangeType instruction applies to X a value-preserving type
-// change to Type().
-//
-// Type changes are permitted:
-// - between a named type and its underlying type.
-// - between two named types of the same underlying type.
-// - between (possibly named) pointers to identical base types.
-// - from a bidirectional channel to a read- or write-channel,
-// optionally adding/removing a name.
-//
-// This operation cannot fail dynamically.
-//
-// Pos() returns the ast.CallExpr.Lparen, if the instruction arose
-// from an explicit conversion in the source.
-//
-// Example printed form:
-// t2 = ChangeType <*T> t1
-//
-type ChangeType struct {
- register
- X Value
-}
-
-// The Convert instruction yields the conversion of value X to type
-// Type(). One or both of those types is basic (but possibly named).
-//
-// A conversion may change the value and representation of its operand.
-// Conversions are permitted:
-// - between real numeric types.
-// - between complex numeric types.
-// - between string and []byte or []rune.
-// - between pointers and unsafe.Pointer.
-// - between unsafe.Pointer and uintptr.
-// - from (Unicode) integer to (UTF-8) string.
-// A conversion may imply a type name change also.
-//
-// This operation cannot fail dynamically.
-//
-// Conversions of untyped string/number/bool constants to a specific
-// representation are eliminated during IR construction.
-//
-// Pos() returns the ast.CallExpr.Lparen, if the instruction arose
-// from an explicit conversion in the source.
-//
-// Example printed form:
-// t2 = Convert <[]byte> t1
-//
-type Convert struct {
- register
- X Value
-}
-
-// ChangeInterface constructs a value of one interface type from a
-// value of another interface type known to be assignable to it.
-// This operation cannot fail.
-//
-// Pos() returns the ast.CallExpr.Lparen if the instruction arose from
-// an explicit T(e) conversion; the ast.TypeAssertExpr.Lparen if the
-// instruction arose from an explicit e.(T) operation; or token.NoPos
-// otherwise.
-//
-// Example printed form:
-// t2 = ChangeInterface t1
-//
-type ChangeInterface struct {
- register
- X Value
-}
-
-// MakeInterface constructs an instance of an interface type from a
-// value of a concrete type.
-//
-// Use Program.MethodSets.MethodSet(X.Type()) to find the method-set
-// of X, and Program.MethodValue(m) to find the implementation of a method.
-//
-// To construct the zero value of an interface type T, use:
-// NewConst(constant.MakeNil(), T, pos)
-//
-// Pos() returns the ast.CallExpr.Lparen, if the instruction arose
-// from an explicit conversion in the source.
-//
-// Example printed form:
-// t2 = MakeInterface t1
-//
-type MakeInterface struct {
- register
- X Value
-}
-
-// The MakeClosure instruction yields a closure value whose code is
-// Fn and whose free variables' values are supplied by Bindings.
-//
-// Type() returns a (possibly named) *types.Signature.
-//
-// Pos() returns the ast.FuncLit.Type.Func for a function literal
-// closure or the ast.SelectorExpr.Sel for a bound method closure.
-//
-// Example printed form:
-// t1 = MakeClosure foo$1 t1 t2
-// t5 = MakeClosure (T).foo$bound t4
-//
-type MakeClosure struct {
- register
- Fn Value // always a *Function
- Bindings []Value // values for each free variable in Fn.FreeVars
-}
-
-// The MakeMap instruction creates a new hash-table-based map object
-// and yields a value of kind map.
-//
-// Type() returns a (possibly named) *types.Map.
-//
-// Pos() returns the ast.CallExpr.Lparen, if created by make(map), or
-// the ast.CompositeLit.Lbrack if created by a literal.
-//
-// Example printed form:
-// t1 = MakeMap
-// t2 = MakeMap t1
-//
-type MakeMap struct {
- register
- Reserve Value // initial space reservation; nil => default
-}
-
-// The MakeChan instruction creates a new channel object and yields a
-// value of kind chan.
-//
-// Type() returns a (possibly named) *types.Chan.
-//
-// Pos() returns the ast.CallExpr.Lparen for the make(chan) that
-// created it.
-//
-// Example printed form:
-// t3 = MakeChan t1
-// t4 = MakeChan t2
-//
-type MakeChan struct {
- register
- Size Value // int; size of buffer; zero => synchronous.
-}
-
-// The MakeSlice instruction yields a slice of length Len backed by a
-// newly allocated array of length Cap.
-//
-// Both Len and Cap must be non-nil Values of integer type.
-//
-// (Alloc(types.Array) followed by Slice will not suffice because
-// Alloc can only create arrays of constant length.)
-//
-// Type() returns a (possibly named) *types.Slice.
-//
-// Pos() returns the ast.CallExpr.Lparen for the make([]T) that
-// created it.
-//
-// Example printed form:
-// t3 = MakeSlice <[]string> t1 t2
-// t4 = MakeSlice t1 t2
-//
-type MakeSlice struct {
- register
- Len Value
- Cap Value
-}
-
-// The Slice instruction yields a slice of an existing string, slice
-// or *array X between optional integer bounds Low and High.
-//
-// Dynamically, this instruction panics if X evaluates to a nil *array
-// pointer.
-//
-// Type() returns string if the type of X was string, otherwise a
-// *types.Slice with the same element type as X.
-//
-// Pos() returns the ast.SliceExpr.Lbrack if created by a x[:] slice
-// operation, the ast.CompositeLit.Lbrace if created by a literal, or
-// NoPos if not explicit in the source (e.g. a variadic argument slice).
-//
-// Example printed form:
-// t4 = Slice <[]int> t3 t2 t1
-//
-type Slice struct {
- register
- X Value // slice, string, or *array
- Low, High, Max Value // each may be nil
-}
-
-// The FieldAddr instruction yields the address of Field of *struct X.
-//
-// The field is identified by its index within the field list of the
-// struct type of X.
-//
-// Dynamically, this instruction panics if X evaluates to a nil
-// pointer.
-//
-// Type() returns a (possibly named) *types.Pointer.
-//
-// Pos() returns the position of the ast.SelectorExpr.Sel for the
-// field, if explicit in the source.
-//
-// Example printed form:
-// t2 = FieldAddr <*int> [0] (X) t1
-//
-type FieldAddr struct {
- register
- X Value // *struct
- Field int // field is X.Type().Underlying().(*types.Pointer).Elem().Underlying().(*types.Struct).Field(Field)
-}
-
-// The Field instruction yields the Field of struct X.
-//
-// The field is identified by its index within the field list of the
-// struct type of X; by using numeric indices we avoid ambiguity of
-// package-local identifiers and permit compact representations.
-//
-// Pos() returns the position of the ast.SelectorExpr.Sel for the
-// field, if explicit in the source.
-//
-// Example printed form:
-// t2 = FieldAddr [0] (X) t1
-//
-type Field struct {
- register
- X Value // struct
- Field int // index into X.Type().(*types.Struct).Fields
-}
-
-// The IndexAddr instruction yields the address of the element at
-// index Index of collection X. Index is an integer expression.
-//
-// The elements of maps and strings are not addressable; use StringLookup, MapLookup or
-// MapUpdate instead.
-//
-// Dynamically, this instruction panics if X evaluates to a nil *array
-// pointer.
-//
-// Type() returns a (possibly named) *types.Pointer.
-//
-// Pos() returns the ast.IndexExpr.Lbrack for the index operation, if
-// explicit in the source.
-//
-// Example printed form:
-// t3 = IndexAddr <*int> t2 t1
-//
-type IndexAddr struct {
- register
- X Value // slice or *array,
- Index Value // numeric index
-}
-
-// The Index instruction yields element Index of array X.
-//
-// Pos() returns the ast.IndexExpr.Lbrack for the index operation, if
-// explicit in the source.
-//
-// Example printed form:
-// t3 = Index t2 t1
-//
-type Index struct {
- register
- X Value // array
- Index Value // integer index
-}
-
-// The MapLookup instruction yields element Index of collection X, a map.
-//
-// If CommaOk, the result is a 2-tuple of the value above and a
-// boolean indicating the result of a map membership test for the key.
-// The components of the tuple are accessed using Extract.
-//
-// Pos() returns the ast.IndexExpr.Lbrack, if explicit in the source.
-//
-// Example printed form:
-// t4 = MapLookup t3 t1
-// t6 = MapLookup <(string, bool)> t3 t2
-//
-type MapLookup struct {
- register
- X Value // map
- Index Value // key-typed index
- CommaOk bool // return a value,ok pair
-}
-
-// The StringLookup instruction yields element Index of collection X, a string.
-// Index is an integer expression.
-//
-// Pos() returns the ast.IndexExpr.Lbrack, if explicit in the source.
-//
-// Example printed form:
-// t3 = StringLookup t2 t1
-//
-type StringLookup struct {
- register
- X Value // string
- Index Value // numeric index
-}
-
-// SelectState is a helper for Select.
-// It represents one goal state and its corresponding communication.
-//
-type SelectState struct {
- Dir types.ChanDir // direction of case (SendOnly or RecvOnly)
- Chan Value // channel to use (for send or receive)
- Send Value // value to send (for send)
- Pos token.Pos // position of token.ARROW
- DebugNode ast.Node // ast.SendStmt or ast.UnaryExpr(<-) [debug mode]
-}
-
-// The Select instruction tests whether (or blocks until) one
-// of the specified sent or received states is entered.
-//
-// Let n be the number of States for which Dir==RECV and Tᵢ (0 ≤ i < n)
-// be the element type of each such state's Chan.
-// Select returns an n+2-tuple
-// (index int, recvOk bool, r₀ T₀, ... rₙ-1 Tₙ-1)
-// The tuple's components, described below, must be accessed via the
-// Extract instruction.
-//
-// If Blocking, select waits until exactly one state holds, i.e. a
-// channel becomes ready for the designated operation of sending or
-// receiving; select chooses one among the ready states
-// pseudorandomly, performs the send or receive operation, and sets
-// 'index' to the index of the chosen channel.
-//
-// If !Blocking, select doesn't block if no states hold; instead it
-// returns immediately with index equal to -1.
-//
-// If the chosen channel was used for a receive, the rᵢ component is
-// set to the received value, where i is the index of that state among
-// all n receive states; otherwise rᵢ has the zero value of type Tᵢ.
-// Note that the receive index i is not the same as the state
-// index index.
-//
-// The second component of the triple, recvOk, is a boolean whose value
-// is true iff the selected operation was a receive and the receive
-// successfully yielded a value.
-//
-// Pos() returns the ast.SelectStmt.Select.
-//
-// Example printed form:
-// t6 = SelectNonBlocking <(index int, ok bool, int)> [<-t4, t5<-t1]
-// t11 = SelectBlocking <(index int, ok bool)> []
-//
-type Select struct {
- register
- States []*SelectState
- Blocking bool
-}
-
-// The Range instruction yields an iterator over the domain and range
-// of X, which must be a string or map.
-//
-// Elements are accessed via Next.
-//
-// Type() returns an opaque and degenerate "rangeIter" type.
-//
-// Pos() returns the ast.RangeStmt.For.
-//
-// Example printed form:
-// t2 = Range t1
-//
-type Range struct {
- register
- X Value // string or map
-}
-
-// The Next instruction reads and advances the (map or string)
-// iterator Iter and returns a 3-tuple value (ok, k, v). If the
-// iterator is not exhausted, ok is true and k and v are the next
-// elements of the domain and range, respectively. Otherwise ok is
-// false and k and v are undefined.
-//
-// Components of the tuple are accessed using Extract.
-//
-// The IsString field distinguishes iterators over strings from those
-// over maps, as the Type() alone is insufficient: consider
-// map[int]rune.
-//
-// Type() returns a *types.Tuple for the triple (ok, k, v).
-// The types of k and/or v may be types.Invalid.
-//
-// Example printed form:
-// t5 = Next <(ok bool, k int, v rune)> t2
-// t5 = Next <(ok bool, k invalid type, v invalid type)> t2
-//
-type Next struct {
- register
- Iter Value
- IsString bool // true => string iterator; false => map iterator.
-}
-
-// The TypeAssert instruction tests whether interface value X has type
-// AssertedType.
-//
-// If !CommaOk, on success it returns v, the result of the conversion
-// (defined below); on failure it panics.
-//
-// If CommaOk: on success it returns a pair (v, true) where v is the
-// result of the conversion; on failure it returns (z, false) where z
-// is AssertedType's zero value. The components of the pair must be
-// accessed using the Extract instruction.
-//
-// If AssertedType is a concrete type, TypeAssert checks whether the
-// dynamic type in interface X is equal to it, and if so, the result
-// of the conversion is a copy of the value in the interface.
-//
-// If AssertedType is an interface, TypeAssert checks whether the
-// dynamic type of the interface is assignable to it, and if so, the
-// result of the conversion is a copy of the interface value X.
-// If AssertedType is a superinterface of X.Type(), the operation will
-// fail iff the operand is nil. (Contrast with ChangeInterface, which
-// performs no nil-check.)
-//
-// Type() reflects the actual type of the result, possibly a
-// 2-types.Tuple; AssertedType is the asserted type.
-//
-// Pos() returns the ast.CallExpr.Lparen if the instruction arose from
-// an explicit T(e) conversion; the ast.TypeAssertExpr.Lparen if the
-// instruction arose from an explicit e.(T) operation; or the
-// ast.CaseClause.Case if the instruction arose from a case of a
-// type-switch statement.
-//
-// Example printed form:
-// t2 = TypeAssert t1
-// t4 = TypeAssert <(value fmt.Stringer, ok bool)> t1
-//
-type TypeAssert struct {
- register
- X Value
- AssertedType types.Type
- CommaOk bool
-}
-
-// The Extract instruction yields component Index of Tuple.
-//
-// This is used to access the results of instructions with multiple
-// return values, such as Call, TypeAssert, Next, Recv,
-// MapLookup and others.
-//
-// Example printed form:
-// t7 = Extract [1] (ok) t4
-//
-type Extract struct {
- register
- Tuple Value
- Index int
-}
-
-// Instructions executed for effect. They do not yield a value. --------------------
-
-// The Jump instruction transfers control to the sole successor of its
-// owning block.
-//
-// A Jump must be the last instruction of its containing BasicBlock.
-//
-// Pos() returns NoPos.
-//
-// Example printed form:
-// Jump → b1
-//
-type Jump struct {
- anInstruction
- Comment string
-}
-
-// The Unreachable pseudo-instruction signals that execution cannot
-// continue after the preceding function call because it terminates
-// the process.
-//
-// The instruction acts as a control instruction, jumping to the exit
-// block. However, this jump will never execute.
-//
-// An Unreachable instruction must be the last instruction of its
-// containing BasicBlock.
-//
-// Example printed form:
-// Unreachable → b1
-//
-type Unreachable struct {
- anInstruction
-}
-
-// The If instruction transfers control to one of the two successors
-// of its owning block, depending on the boolean Cond: the first if
-// true, the second if false.
-//
-// An If instruction must be the last instruction of its containing
-// BasicBlock.
-//
-// Pos() returns the *ast.IfStmt, if explicit in the source.
-//
-// Example printed form:
-// If t2 → b1 b2
-//
-type If struct {
- anInstruction
- Cond Value
-}
-
-type ConstantSwitch struct {
- anInstruction
- Tag Value
- // Constant branch conditions. A nil Value denotes the (implicit
- // or explicit) default branch.
- Conds []Value
-}
-
-type TypeSwitch struct {
- register
- Tag Value
- Conds []types.Type
-}
-
-// The Return instruction returns values and control back to the calling
-// function.
-//
-// len(Results) is always equal to the number of results in the
-// function's signature.
-//
-// If len(Results) > 1, Return returns a tuple value with the specified
-// components which the caller must access using Extract instructions.
-//
-// There is no instruction to return a ready-made tuple like those
-// returned by a "value,ok"-mode TypeAssert, MapLookup or Recv or
-// a tail-call to a function with multiple result parameters.
-//
-// Return must be the last instruction of its containing BasicBlock.
-// Such a block has no successors.
-//
-// Pos() returns the ast.ReturnStmt.Return, if explicit in the source.
-//
-// Example printed form:
-// Return
-// Return t1 t2
-//
-type Return struct {
- anInstruction
- Results []Value
-}
-
-// The RunDefers instruction pops and invokes the entire stack of
-// procedure calls pushed by Defer instructions in this function.
-//
-// It is legal to encounter multiple 'rundefers' instructions in a
-// single control-flow path through a function; this is useful in
-// the combined init() function, for example.
-//
-// Pos() returns NoPos.
-//
-// Example printed form:
-// RunDefers
-//
-type RunDefers struct {
- anInstruction
-}
-
-// The Panic instruction initiates a panic with value X.
-//
-// A Panic instruction must be the last instruction of its containing
-// BasicBlock, which must have one successor, the exit block.
-//
-// NB: 'go panic(x)' and 'defer panic(x)' do not use this instruction;
-// they are treated as calls to a built-in function.
-//
-// Pos() returns the ast.CallExpr.Lparen if this panic was explicit
-// in the source.
-//
-// Example printed form:
-// Panic t1
-//
-type Panic struct {
- anInstruction
- X Value // an interface{}
-}
-
-// The Go instruction creates a new goroutine and calls the specified
-// function within it.
-//
-// See CallCommon for generic function call documentation.
-//
-// Pos() returns the ast.GoStmt.Go.
-//
-// Example printed form:
-// Go println t1
-// Go t3
-// GoInvoke t4.Bar t2
-//
-type Go struct {
- anInstruction
- Call CallCommon
-}
-
-// The Defer instruction pushes the specified call onto a stack of
-// functions to be called by a RunDefers instruction or by a panic.
-//
-// See CallCommon for generic function call documentation.
-//
-// Pos() returns the ast.DeferStmt.Defer.
-//
-// Example printed form:
-// Defer println t1
-// Defer t3
-// DeferInvoke t4.Bar t2
-//
-type Defer struct {
- anInstruction
- Call CallCommon
-}
-
-// The Send instruction sends X on channel Chan.
-//
-// Pos() returns the ast.SendStmt.Arrow, if explicit in the source.
-//
-// Example printed form:
-// Send t2 t1
-//
-type Send struct {
- anInstruction
- Chan, X Value
-}
-
-// The Recv instruction receives from channel Chan.
-//
-// If CommaOk, the result is a 2-tuple of the value above
-// and a boolean indicating the success of the receive. The
-// components of the tuple are accessed using Extract.
-//
-// Pos() returns the ast.UnaryExpr.OpPos, if explicit in the source.
-// For receive operations implicit in ranging over a channel,
-// Pos() returns the ast.RangeStmt.For.
-//
-// Example printed form:
-// t2 = Recv t1
-// t3 = Recv <(int, bool)> t1
-type Recv struct {
- register
- Chan Value
- CommaOk bool
-}
-
-// The Store instruction stores Val at address Addr.
-// Stores can be of arbitrary types.
-//
-// Pos() returns the position of the source-level construct most closely
-// associated with the memory store operation.
-// Since implicit memory stores are numerous and varied and depend upon
-// implementation choices, the details are not specified.
-//
-// Example printed form:
-// Store {int} t2 t1
-//
-type Store struct {
- anInstruction
- Addr Value
- Val Value
-}
-
-// The BlankStore instruction is emitted for assignments to the blank
-// identifier.
-//
-// BlankStore is a pseudo-instruction: it has no dynamic effect.
-//
-// Pos() returns NoPos.
-//
-// Example printed form:
-// BlankStore t1
-//
-type BlankStore struct {
- anInstruction
- Val Value
-}
-
-// The MapUpdate instruction updates the association of Map[Key] to
-// Value.
-//
-// Pos() returns the ast.KeyValueExpr.Colon or ast.IndexExpr.Lbrack,
-// if explicit in the source.
-//
-// Example printed form:
-// MapUpdate t3 t1 t2
-//
-type MapUpdate struct {
- anInstruction
- Map Value
- Key Value
- Value Value
-}
-
-// A DebugRef instruction maps a source-level expression Expr to the
-// IR value X that represents the value (!IsAddr) or address (IsAddr)
-// of that expression.
-//
-// DebugRef is a pseudo-instruction: it has no dynamic effect.
-//
-// Pos() returns Expr.Pos(), the start position of the source-level
-// expression. This is not the same as the "designated" token as
-// documented at Value.Pos(). e.g. CallExpr.Pos() does not return the
-// position of the ("designated") Lparen token.
-//
-// DebugRefs are generated only for functions built with debugging
-// enabled; see Package.SetDebugMode() and the GlobalDebug builder
-// mode flag.
-//
-// DebugRefs are not emitted for ast.Idents referring to constants or
-// predeclared identifiers, since they are trivial and numerous.
-// Nor are they emitted for ast.ParenExprs.
-//
-// (By representing these as instructions, rather than out-of-band,
-// consistency is maintained during transformation passes by the
-// ordinary SSA renaming machinery.)
-//
-// Example printed form:
-// ; *ast.CallExpr @ 102:9 is t5
-// ; var x float64 @ 109:72 is x
-// ; address of *ast.CompositeLit @ 216:10 is t0
-//
-type DebugRef struct {
- anInstruction
- Expr ast.Expr // the referring expression (never *ast.ParenExpr)
- object types.Object // the identity of the source var/func
- IsAddr bool // Expr is addressable and X is the address it denotes
- X Value // the value or address of Expr
-}
-
-// Embeddable mix-ins and helpers for common parts of other structs. -----------
-
-// register is a mix-in embedded by all IR values that are also
-// instructions, i.e. virtual registers, and provides a uniform
-// implementation of most of the Value interface: Value.Name() is a
-// numbered register (e.g. "t0"); the other methods are field accessors.
-//
-// Temporary names are automatically assigned to each register on
-// completion of building a function in IR form.
-//
-type register struct {
- anInstruction
- typ types.Type // type of virtual register
- referrers []Instruction
-}
-
-type node struct {
- source ast.Node
- id ID
-}
-
-func (n *node) setID(id ID) { n.id = id }
-func (n node) ID() ID { return n.id }
-
-func (n *node) setSource(source ast.Node) { n.source = source }
-func (n *node) Source() ast.Node { return n.source }
-
-func (n *node) Pos() token.Pos {
- if n.source != nil {
- return n.source.Pos()
- }
- return token.NoPos
-}
-
-// anInstruction is a mix-in embedded by all Instructions.
-// It provides the implementations of the Block and setBlock methods.
-type anInstruction struct {
- node
- block *BasicBlock // the basic block of this instruction
-}
-
-// CallCommon is contained by Go, Defer and Call to hold the
-// common parts of a function or method call.
-//
-// Each CallCommon exists in one of two modes, function call and
-// interface method invocation, or "call" and "invoke" for short.
-//
-// 1. "call" mode: when Method is nil (!IsInvoke), a CallCommon
-// represents an ordinary function call of the value in Value,
-// which may be a *Builtin, a *Function or any other value of kind
-// 'func'.
-//
-// Value may be one of:
-// (a) a *Function, indicating a statically dispatched call
-// to a package-level function, an anonymous function, or
-// a method of a named type.
-// (b) a *MakeClosure, indicating an immediately applied
-// function literal with free variables.
-// (c) a *Builtin, indicating a statically dispatched call
-// to a built-in function.
-// (d) any other value, indicating a dynamically dispatched
-// function call.
-// StaticCallee returns the identity of the callee in cases
-// (a) and (b), nil otherwise.
-//
-// Args contains the arguments to the call. If Value is a method,
-// Args[0] contains the receiver parameter.
-//
-// Example printed form:
-// t3 = Call <()> println t1 t2
-// Go t3
-// Defer t3
-//
-// 2. "invoke" mode: when Method is non-nil (IsInvoke), a CallCommon
-// represents a dynamically dispatched call to an interface method.
-// In this mode, Value is the interface value and Method is the
-// interface's abstract method. Note: an abstract method may be
-// shared by multiple interfaces due to embedding; Value.Type()
-// provides the specific interface used for this call.
-//
-// Value is implicitly supplied to the concrete method implementation
-// as the receiver parameter; in other words, Args[0] holds not the
-// receiver but the first true argument.
-//
-// Example printed form:
-// t6 = Invoke t5.String
-// GoInvoke t4.Bar t2
-// DeferInvoke t4.Bar t2
-//
-// For all calls to variadic functions (Signature().Variadic()),
-// the last element of Args is a slice.
-//
-type CallCommon struct {
- Value Value // receiver (invoke mode) or func value (call mode)
- Method *types.Func // abstract method (invoke mode)
- Args []Value // actual parameters (in static method call, includes receiver)
- Results Value
-}
-
-// IsInvoke returns true if this call has "invoke" (not "call") mode.
-func (c *CallCommon) IsInvoke() bool {
- return c.Method != nil
-}
-
-// Signature returns the signature of the called function.
-//
-// For an "invoke"-mode call, the signature of the interface method is
-// returned.
-//
-// In either "call" or "invoke" mode, if the callee is a method, its
-// receiver is represented by sig.Recv, not sig.Params().At(0).
-//
-func (c *CallCommon) Signature() *types.Signature {
- if c.Method != nil {
- return c.Method.Type().(*types.Signature)
- }
- return c.Value.Type().Underlying().(*types.Signature)
-}
-
-// StaticCallee returns the callee if this is a trivially static
-// "call"-mode call to a function.
-func (c *CallCommon) StaticCallee() *Function {
- switch fn := c.Value.(type) {
- case *Function:
- return fn
- case *MakeClosure:
- return fn.Fn.(*Function)
- }
- return nil
-}
-
-// Description returns a description of the mode of this call suitable
-// for a user interface, e.g., "static method call".
-func (c *CallCommon) Description() string {
- switch fn := c.Value.(type) {
- case *Builtin:
- return "built-in function call"
- case *MakeClosure:
- return "static function closure call"
- case *Function:
- if fn.Signature.Recv() != nil {
- return "static method call"
- }
- return "static function call"
- }
- if c.IsInvoke() {
- return "dynamic method call" // ("invoke" mode)
- }
- return "dynamic function call"
-}
-
-// The CallInstruction interface, implemented by *Go, *Defer and *Call,
-// exposes the common parts of function-calling instructions,
-// yet provides a way back to the Value defined by *Call alone.
-//
-type CallInstruction interface {
- Instruction
- Common() *CallCommon // returns the common parts of the call
- Value() *Call
-}
-
-func (s *Call) Common() *CallCommon { return &s.Call }
-func (s *Defer) Common() *CallCommon { return &s.Call }
-func (s *Go) Common() *CallCommon { return &s.Call }
-
-func (s *Call) Value() *Call { return s }
-func (s *Defer) Value() *Call { return nil }
-func (s *Go) Value() *Call { return nil }
-
-func (v *Builtin) Type() types.Type { return v.sig }
-func (v *Builtin) Name() string { return v.name }
-func (*Builtin) Referrers() *[]Instruction { return nil }
-func (v *Builtin) Pos() token.Pos { return token.NoPos }
-func (v *Builtin) Object() types.Object { return types.Universe.Lookup(v.name) }
-func (v *Builtin) Parent() *Function { return nil }
-
-func (v *FreeVar) Type() types.Type { return v.typ }
-func (v *FreeVar) Name() string { return v.name }
-func (v *FreeVar) Referrers() *[]Instruction { return &v.referrers }
-func (v *FreeVar) Parent() *Function { return v.parent }
-
-func (v *Global) Type() types.Type { return v.typ }
-func (v *Global) Name() string { return v.name }
-func (v *Global) Parent() *Function { return nil }
-func (v *Global) Referrers() *[]Instruction { return nil }
-func (v *Global) Token() token.Token { return token.VAR }
-func (v *Global) Object() types.Object { return v.object }
-func (v *Global) String() string { return v.RelString(nil) }
-func (v *Global) Package() *Package { return v.Pkg }
-func (v *Global) RelString(from *types.Package) string { return relString(v, from) }
-
-func (v *Function) Name() string { return v.name }
-func (v *Function) Type() types.Type { return v.Signature }
-func (v *Function) Token() token.Token { return token.FUNC }
-func (v *Function) Object() types.Object { return v.object }
-func (v *Function) String() string { return v.RelString(nil) }
-func (v *Function) Package() *Package { return v.Pkg }
-func (v *Function) Parent() *Function { return v.parent }
-func (v *Function) Referrers() *[]Instruction {
- if v.parent != nil {
- return &v.referrers
- }
- return nil
-}
-
-func (v *Parameter) Object() types.Object { return v.object }
-
-func (v *Alloc) Type() types.Type { return v.typ }
-func (v *Alloc) Referrers() *[]Instruction { return &v.referrers }
-
-func (v *register) Type() types.Type { return v.typ }
-func (v *register) setType(typ types.Type) { v.typ = typ }
-func (v *register) Name() string { return fmt.Sprintf("t%d", v.id) }
-func (v *register) Referrers() *[]Instruction { return &v.referrers }
-
-func (v *anInstruction) Parent() *Function { return v.block.parent }
-func (v *anInstruction) Block() *BasicBlock { return v.block }
-func (v *anInstruction) setBlock(block *BasicBlock) { v.block = block }
-func (v *anInstruction) Referrers() *[]Instruction { return nil }
-
-func (t *Type) Name() string { return t.object.Name() }
-func (t *Type) Pos() token.Pos { return t.object.Pos() }
-func (t *Type) Type() types.Type { return t.object.Type() }
-func (t *Type) Token() token.Token { return token.TYPE }
-func (t *Type) Object() types.Object { return t.object }
-func (t *Type) String() string { return t.RelString(nil) }
-func (t *Type) Package() *Package { return t.pkg }
-func (t *Type) RelString(from *types.Package) string { return relString(t, from) }
-
-func (c *NamedConst) Name() string { return c.object.Name() }
-func (c *NamedConst) Pos() token.Pos { return c.object.Pos() }
-func (c *NamedConst) String() string { return c.RelString(nil) }
-func (c *NamedConst) Type() types.Type { return c.object.Type() }
-func (c *NamedConst) Token() token.Token { return token.CONST }
-func (c *NamedConst) Object() types.Object { return c.object }
-func (c *NamedConst) Package() *Package { return c.pkg }
-func (c *NamedConst) RelString(from *types.Package) string { return relString(c, from) }
-
-// Func returns the package-level function of the specified name,
-// or nil if not found.
-//
-func (p *Package) Func(name string) (f *Function) {
- f, _ = p.Members[name].(*Function)
- return
-}
-
-// Var returns the package-level variable of the specified name,
-// or nil if not found.
-//
-func (p *Package) Var(name string) (g *Global) {
- g, _ = p.Members[name].(*Global)
- return
-}
-
-// Const returns the package-level constant of the specified name,
-// or nil if not found.
-//
-func (p *Package) Const(name string) (c *NamedConst) {
- c, _ = p.Members[name].(*NamedConst)
- return
-}
-
-// Type returns the package-level type of the specified name,
-// or nil if not found.
-//
-func (p *Package) Type(name string) (t *Type) {
- t, _ = p.Members[name].(*Type)
- return
-}
-
-func (s *DebugRef) Pos() token.Pos { return s.Expr.Pos() }
-
-// Operands.
-
-func (v *Alloc) Operands(rands []*Value) []*Value {
- return rands
-}
-
-func (v *BinOp) Operands(rands []*Value) []*Value {
- return append(rands, &v.X, &v.Y)
-}
-
-func (c *CallCommon) Operands(rands []*Value) []*Value {
- rands = append(rands, &c.Value)
- for i := range c.Args {
- rands = append(rands, &c.Args[i])
- }
- return rands
-}
-
-func (s *Go) Operands(rands []*Value) []*Value {
- return s.Call.Operands(rands)
-}
-
-func (s *Call) Operands(rands []*Value) []*Value {
- return s.Call.Operands(rands)
-}
-
-func (s *Defer) Operands(rands []*Value) []*Value {
- return s.Call.Operands(rands)
-}
-
-func (v *ChangeInterface) Operands(rands []*Value) []*Value {
- return append(rands, &v.X)
-}
-
-func (v *ChangeType) Operands(rands []*Value) []*Value {
- return append(rands, &v.X)
-}
-
-func (v *Convert) Operands(rands []*Value) []*Value {
- return append(rands, &v.X)
-}
-
-func (s *DebugRef) Operands(rands []*Value) []*Value {
- return append(rands, &s.X)
-}
-
-func (v *Extract) Operands(rands []*Value) []*Value {
- return append(rands, &v.Tuple)
-}
-
-func (v *Field) Operands(rands []*Value) []*Value {
- return append(rands, &v.X)
-}
-
-func (v *FieldAddr) Operands(rands []*Value) []*Value {
- return append(rands, &v.X)
-}
-
-func (s *If) Operands(rands []*Value) []*Value {
- return append(rands, &s.Cond)
-}
-
-func (s *ConstantSwitch) Operands(rands []*Value) []*Value {
- rands = append(rands, &s.Tag)
- for i := range s.Conds {
- rands = append(rands, &s.Conds[i])
- }
- return rands
-}
-
-func (s *TypeSwitch) Operands(rands []*Value) []*Value {
- rands = append(rands, &s.Tag)
- return rands
-}
-
-func (v *Index) Operands(rands []*Value) []*Value {
- return append(rands, &v.X, &v.Index)
-}
-
-func (v *IndexAddr) Operands(rands []*Value) []*Value {
- return append(rands, &v.X, &v.Index)
-}
-
-func (*Jump) Operands(rands []*Value) []*Value {
- return rands
-}
-
-func (*Unreachable) Operands(rands []*Value) []*Value {
- return rands
-}
-
-func (v *MapLookup) Operands(rands []*Value) []*Value {
- return append(rands, &v.X, &v.Index)
-}
-
-func (v *StringLookup) Operands(rands []*Value) []*Value {
- return append(rands, &v.X, &v.Index)
-}
-
-func (v *MakeChan) Operands(rands []*Value) []*Value {
- return append(rands, &v.Size)
-}
-
-func (v *MakeClosure) Operands(rands []*Value) []*Value {
- rands = append(rands, &v.Fn)
- for i := range v.Bindings {
- rands = append(rands, &v.Bindings[i])
- }
- return rands
-}
-
-func (v *MakeInterface) Operands(rands []*Value) []*Value {
- return append(rands, &v.X)
-}
-
-func (v *MakeMap) Operands(rands []*Value) []*Value {
- return append(rands, &v.Reserve)
-}
-
-func (v *MakeSlice) Operands(rands []*Value) []*Value {
- return append(rands, &v.Len, &v.Cap)
-}
-
-func (v *MapUpdate) Operands(rands []*Value) []*Value {
- return append(rands, &v.Map, &v.Key, &v.Value)
-}
-
-func (v *Next) Operands(rands []*Value) []*Value {
- return append(rands, &v.Iter)
-}
-
-func (s *Panic) Operands(rands []*Value) []*Value {
- return append(rands, &s.X)
-}
-
-func (v *Sigma) Operands(rands []*Value) []*Value {
- return append(rands, &v.X)
-}
-
-func (v *Phi) Operands(rands []*Value) []*Value {
- for i := range v.Edges {
- rands = append(rands, &v.Edges[i])
- }
- return rands
-}
-
-func (v *Range) Operands(rands []*Value) []*Value {
- return append(rands, &v.X)
-}
-
-func (s *Return) Operands(rands []*Value) []*Value {
- for i := range s.Results {
- rands = append(rands, &s.Results[i])
- }
- return rands
-}
-
-func (*RunDefers) Operands(rands []*Value) []*Value {
- return rands
-}
-
-func (v *Select) Operands(rands []*Value) []*Value {
- for i := range v.States {
- rands = append(rands, &v.States[i].Chan, &v.States[i].Send)
- }
- return rands
-}
-
-func (s *Send) Operands(rands []*Value) []*Value {
- return append(rands, &s.Chan, &s.X)
-}
-
-func (recv *Recv) Operands(rands []*Value) []*Value {
- return append(rands, &recv.Chan)
-}
-
-func (v *Slice) Operands(rands []*Value) []*Value {
- return append(rands, &v.X, &v.Low, &v.High, &v.Max)
-}
-
-func (s *Store) Operands(rands []*Value) []*Value {
- return append(rands, &s.Addr, &s.Val)
-}
-
-func (s *BlankStore) Operands(rands []*Value) []*Value {
- return append(rands, &s.Val)
-}
-
-func (v *TypeAssert) Operands(rands []*Value) []*Value {
- return append(rands, &v.X)
-}
-
-func (v *UnOp) Operands(rands []*Value) []*Value {
- return append(rands, &v.X)
-}
-
-func (v *Load) Operands(rands []*Value) []*Value {
- return append(rands, &v.X)
-}
-
-// Non-Instruction Values:
-func (v *Builtin) Operands(rands []*Value) []*Value { return rands }
-func (v *FreeVar) Operands(rands []*Value) []*Value { return rands }
-func (v *Const) Operands(rands []*Value) []*Value { return rands }
-func (v *Function) Operands(rands []*Value) []*Value { return rands }
-func (v *Global) Operands(rands []*Value) []*Value { return rands }
-func (v *Parameter) Operands(rands []*Value) []*Value { return rands }
diff --git a/vendor/honnef.co/go/tools/ir/staticcheck.conf b/vendor/honnef.co/go/tools/ir/staticcheck.conf
deleted file mode 100644
index d7b38bc3563..00000000000
--- a/vendor/honnef.co/go/tools/ir/staticcheck.conf
+++ /dev/null
@@ -1,3 +0,0 @@
-# ssa/... is mostly imported from upstream and we don't want to
-# deviate from it too much, hence disabling SA1019
-checks = ["inherit", "-SA1019"]
diff --git a/vendor/honnef.co/go/tools/ir/util.go b/vendor/honnef.co/go/tools/ir/util.go
deleted file mode 100644
index df0f8bf971d..00000000000
--- a/vendor/honnef.co/go/tools/ir/util.go
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ir
-
-// This file defines a number of miscellaneous utility functions.
-
-import (
- "fmt"
- "go/ast"
- "go/token"
- "go/types"
- "io"
- "os"
-
- "golang.org/x/tools/go/ast/astutil"
-)
-
-//// AST utilities
-
-func unparen(e ast.Expr) ast.Expr { return astutil.Unparen(e) }
-
-// isBlankIdent returns true iff e is an Ident with name "_".
-// They have no associated types.Object, and thus no type.
-//
-func isBlankIdent(e ast.Expr) bool {
- id, ok := e.(*ast.Ident)
- return ok && id.Name == "_"
-}
-
-//// Type utilities. Some of these belong in go/types.
-
-// isPointer returns true for types whose underlying type is a pointer.
-func isPointer(typ types.Type) bool {
- _, ok := typ.Underlying().(*types.Pointer)
- return ok
-}
-
-func isInterface(T types.Type) bool { return types.IsInterface(T) }
-
-// deref returns a pointer's element type; otherwise it returns typ.
-func deref(typ types.Type) types.Type {
- if p, ok := typ.Underlying().(*types.Pointer); ok {
- return p.Elem()
- }
- return typ
-}
-
-// recvType returns the receiver type of method obj.
-func recvType(obj *types.Func) types.Type {
- return obj.Type().(*types.Signature).Recv().Type()
-}
-
-// logStack prints the formatted "start" message to stderr and
-// returns a closure that prints the corresponding "end" message.
-// Call using 'defer logStack(...)()' to show builder stack on panic.
-// Don't forget trailing parens!
-//
-func logStack(format string, args ...interface{}) func() {
- msg := fmt.Sprintf(format, args...)
- io.WriteString(os.Stderr, msg)
- io.WriteString(os.Stderr, "\n")
- return func() {
- io.WriteString(os.Stderr, msg)
- io.WriteString(os.Stderr, " end\n")
- }
-}
-
-// newVar creates a 'var' for use in a types.Tuple.
-func newVar(name string, typ types.Type) *types.Var {
- return types.NewParam(token.NoPos, nil, name, typ)
-}
-
-// anonVar creates an anonymous 'var' for use in a types.Tuple.
-func anonVar(typ types.Type) *types.Var {
- return newVar("", typ)
-}
-
-var lenResults = types.NewTuple(anonVar(tInt))
-
-// makeLen returns the len builtin specialized to type func(T)int.
-func makeLen(T types.Type) *Builtin {
- lenParams := types.NewTuple(anonVar(T))
- return &Builtin{
- name: "len",
- sig: types.NewSignature(nil, lenParams, lenResults, false),
- }
-}
diff --git a/vendor/honnef.co/go/tools/ir/wrappers.go b/vendor/honnef.co/go/tools/ir/wrappers.go
deleted file mode 100644
index 7dd33474806..00000000000
--- a/vendor/honnef.co/go/tools/ir/wrappers.go
+++ /dev/null
@@ -1,292 +0,0 @@
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package ir
-
-// This file defines synthesis of Functions that delegate to declared
-// methods; they come in three kinds:
-//
-// (1) wrappers: methods that wrap declared methods, performing
-// implicit pointer indirections and embedded field selections.
-//
-// (2) thunks: funcs that wrap declared methods. Like wrappers,
-// thunks perform indirections and field selections. The thunk's
-// first parameter is used as the receiver for the method call.
-//
-// (3) bounds: funcs that wrap declared methods. The bound's sole
-// free variable, supplied by a closure, is used as the receiver
-// for the method call. No indirections or field selections are
-// performed since they can be done before the call.
-
-import (
- "fmt"
-
- "go/types"
-)
-
-// -- wrappers -----------------------------------------------------------
-
-// makeWrapper returns a synthetic method that delegates to the
-// declared method denoted by meth.Obj(), first performing any
-// necessary pointer indirections or field selections implied by meth.
-//
-// The resulting method's receiver type is meth.Recv().
-//
-// This function is versatile but quite subtle! Consider the
-// following axes of variation when making changes:
-// - optional receiver indirection
-// - optional implicit field selections
-// - meth.Obj() may denote a concrete or an interface method
-// - the result may be a thunk or a wrapper.
-//
-// EXCLUSIVE_LOCKS_REQUIRED(prog.methodsMu)
-//
-func makeWrapper(prog *Program, sel *types.Selection) *Function {
- obj := sel.Obj().(*types.Func) // the declared function
- sig := sel.Type().(*types.Signature) // type of this wrapper
-
- var recv *types.Var // wrapper's receiver or thunk's params[0]
- name := obj.Name()
- var description string
- var start int // first regular param
- if sel.Kind() == types.MethodExpr {
- name += "$thunk"
- description = "thunk"
- recv = sig.Params().At(0)
- start = 1
- } else {
- description = "wrapper"
- recv = sig.Recv()
- }
-
- description = fmt.Sprintf("%s for %s", description, sel.Obj())
- if prog.mode&LogSource != 0 {
- defer logStack("make %s to (%s)", description, recv.Type())()
- }
- fn := &Function{
- name: name,
- method: sel,
- object: obj,
- Signature: sig,
- Synthetic: description,
- Prog: prog,
- functionBody: new(functionBody),
- }
- fn.initHTML(prog.PrintFunc)
- fn.startBody()
- fn.addSpilledParam(recv, nil)
- createParams(fn, start)
-
- indices := sel.Index()
-
- var v Value = fn.Locals[0] // spilled receiver
- if isPointer(sel.Recv()) {
- v = emitLoad(fn, v, nil)
-
- // For simple indirection wrappers, perform an informative nil-check:
- // "value method (T).f called using nil *T pointer"
- if len(indices) == 1 && !isPointer(recvType(obj)) {
- var c Call
- c.Call.Value = &Builtin{
- name: "ir:wrapnilchk",
- sig: types.NewSignature(nil,
- types.NewTuple(anonVar(sel.Recv()), anonVar(tString), anonVar(tString)),
- types.NewTuple(anonVar(sel.Recv())), false),
- }
- c.Call.Args = []Value{
- v,
- emitConst(fn, stringConst(deref(sel.Recv()).String())),
- emitConst(fn, stringConst(sel.Obj().Name())),
- }
- c.setType(v.Type())
- v = fn.emit(&c, nil)
- }
- }
-
- // Invariant: v is a pointer, either
- // value of *A receiver param, or
- // address of A spilled receiver.
-
- // We use pointer arithmetic (FieldAddr possibly followed by
- // Load) in preference to value extraction (Field possibly
- // preceded by Load).
-
- v = emitImplicitSelections(fn, v, indices[:len(indices)-1], nil)
-
- // Invariant: v is a pointer, either
- // value of implicit *C field, or
- // address of implicit C field.
-
- var c Call
- if r := recvType(obj); !isInterface(r) { // concrete method
- if !isPointer(r) {
- v = emitLoad(fn, v, nil)
- }
- c.Call.Value = prog.declaredFunc(obj)
- c.Call.Args = append(c.Call.Args, v)
- } else {
- c.Call.Method = obj
- c.Call.Value = emitLoad(fn, v, nil)
- }
- for _, arg := range fn.Params[1:] {
- c.Call.Args = append(c.Call.Args, arg)
- }
- emitTailCall(fn, &c, nil)
- fn.finishBody()
- return fn
-}
-
-// createParams creates parameters for wrapper method fn based on its
-// Signature.Params, which do not include the receiver.
-// start is the index of the first regular parameter to use.
-//
-func createParams(fn *Function, start int) {
- tparams := fn.Signature.Params()
- for i, n := start, tparams.Len(); i < n; i++ {
- fn.addParamObj(tparams.At(i), nil)
- }
-}
-
-// -- bounds -----------------------------------------------------------
-
-// makeBound returns a bound method wrapper (or "bound"), a synthetic
-// function that delegates to a concrete or interface method denoted
-// by obj. The resulting function has no receiver, but has one free
-// variable which will be used as the method's receiver in the
-// tail-call.
-//
-// Use MakeClosure with such a wrapper to construct a bound method
-// closure. e.g.:
-//
-// type T int or: type T interface { meth() }
-// func (t T) meth()
-// var t T
-// f := t.meth
-// f() // calls t.meth()
-//
-// f is a closure of a synthetic wrapper defined as if by:
-//
-// f := func() { return t.meth() }
-//
-// Unlike makeWrapper, makeBound need perform no indirection or field
-// selections because that can be done before the closure is
-// constructed.
-//
-// EXCLUSIVE_LOCKS_ACQUIRED(meth.Prog.methodsMu)
-//
-func makeBound(prog *Program, obj *types.Func) *Function {
- prog.methodsMu.Lock()
- defer prog.methodsMu.Unlock()
- fn, ok := prog.bounds[obj]
- if !ok {
- description := fmt.Sprintf("bound method wrapper for %s", obj)
- if prog.mode&LogSource != 0 {
- defer logStack("%s", description)()
- }
- fn = &Function{
- name: obj.Name() + "$bound",
- object: obj,
- Signature: changeRecv(obj.Type().(*types.Signature), nil), // drop receiver
- Synthetic: description,
- Prog: prog,
- functionBody: new(functionBody),
- }
- fn.initHTML(prog.PrintFunc)
-
- fv := &FreeVar{name: "recv", typ: recvType(obj), parent: fn}
- fn.FreeVars = []*FreeVar{fv}
- fn.startBody()
- createParams(fn, 0)
- var c Call
-
- if !isInterface(recvType(obj)) { // concrete
- c.Call.Value = prog.declaredFunc(obj)
- c.Call.Args = []Value{fv}
- } else {
- c.Call.Value = fv
- c.Call.Method = obj
- }
- for _, arg := range fn.Params {
- c.Call.Args = append(c.Call.Args, arg)
- }
- emitTailCall(fn, &c, nil)
- fn.finishBody()
-
- prog.bounds[obj] = fn
- }
- return fn
-}
-
-// -- thunks -----------------------------------------------------------
-
-// makeThunk returns a thunk, a synthetic function that delegates to a
-// concrete or interface method denoted by sel.Obj(). The resulting
-// function has no receiver, but has an additional (first) regular
-// parameter.
-//
-// Precondition: sel.Kind() == types.MethodExpr.
-//
-// type T int or: type T interface { meth() }
-// func (t T) meth()
-// f := T.meth
-// var t T
-// f(t) // calls t.meth()
-//
-// f is a synthetic wrapper defined as if by:
-//
-// f := func(t T) { return t.meth() }
-//
-// TODO(adonovan): opt: currently the stub is created even when used
-// directly in a function call: C.f(i, 0). This is less efficient
-// than inlining the stub.
-//
-// EXCLUSIVE_LOCKS_ACQUIRED(meth.Prog.methodsMu)
-//
-func makeThunk(prog *Program, sel *types.Selection) *Function {
- if sel.Kind() != types.MethodExpr {
- panic(sel)
- }
-
- key := selectionKey{
- kind: sel.Kind(),
- recv: sel.Recv(),
- obj: sel.Obj(),
- index: fmt.Sprint(sel.Index()),
- indirect: sel.Indirect(),
- }
-
- prog.methodsMu.Lock()
- defer prog.methodsMu.Unlock()
-
- // Canonicalize key.recv to avoid constructing duplicate thunks.
- canonRecv, ok := prog.canon.At(key.recv).(types.Type)
- if !ok {
- canonRecv = key.recv
- prog.canon.Set(key.recv, canonRecv)
- }
- key.recv = canonRecv
-
- fn, ok := prog.thunks[key]
- if !ok {
- fn = makeWrapper(prog, sel)
- if fn.Signature.Recv() != nil {
- panic(fn) // unexpected receiver
- }
- prog.thunks[key] = fn
- }
- return fn
-}
-
-func changeRecv(s *types.Signature, recv *types.Var) *types.Signature {
- return types.NewSignature(recv, s.Params(), s.Results(), s.Variadic())
-}
-
-// selectionKey is like types.Selection but a usable map key.
-type selectionKey struct {
- kind types.SelectionKind
- recv types.Type // canonicalized via Program.canon
- obj types.Object
- index string
- indirect bool
-}
diff --git a/vendor/honnef.co/go/tools/ir/write.go b/vendor/honnef.co/go/tools/ir/write.go
deleted file mode 100644
index b936bc98528..00000000000
--- a/vendor/honnef.co/go/tools/ir/write.go
+++ /dev/null
@@ -1,5 +0,0 @@
-package ir
-
-func NewJump(parent *BasicBlock) *Jump {
- return &Jump{anInstruction{block: parent}, ""}
-}
diff --git a/vendor/honnef.co/go/tools/lint/LICENSE b/vendor/honnef.co/go/tools/lint/LICENSE
deleted file mode 100644
index 796130a123a..00000000000
--- a/vendor/honnef.co/go/tools/lint/LICENSE
+++ /dev/null
@@ -1,28 +0,0 @@
-Copyright (c) 2013 The Go Authors. All rights reserved.
-Copyright (c) 2016 Dominik Honnef. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/honnef.co/go/tools/lint/lint.go b/vendor/honnef.co/go/tools/lint/lint.go
deleted file mode 100644
index 1a70e0c298e..00000000000
--- a/vendor/honnef.co/go/tools/lint/lint.go
+++ /dev/null
@@ -1,539 +0,0 @@
-// Package lint provides the foundation for tools like staticcheck
-package lint // import "honnef.co/go/tools/lint"
-
-import (
- "bytes"
- "encoding/gob"
- "fmt"
- "go/scanner"
- "go/token"
- "go/types"
- "path/filepath"
- "sort"
- "strings"
- "sync"
- "sync/atomic"
- "unicode"
-
- "golang.org/x/tools/go/analysis"
- "golang.org/x/tools/go/packages"
- "honnef.co/go/tools/config"
- "honnef.co/go/tools/internal/cache"
-)
-
-type Documentation struct {
- Title string
- Text string
- Since string
- NonDefault bool
- Options []string
-}
-
-func (doc *Documentation) String() string {
- b := &strings.Builder{}
- fmt.Fprintf(b, "%s\n\n", doc.Title)
- if doc.Text != "" {
- fmt.Fprintf(b, "%s\n\n", doc.Text)
- }
- fmt.Fprint(b, "Available since\n ")
- if doc.Since == "" {
- fmt.Fprint(b, "unreleased")
- } else {
- fmt.Fprintf(b, "%s", doc.Since)
- }
- if doc.NonDefault {
- fmt.Fprint(b, ", non-default")
- }
- fmt.Fprint(b, "\n")
- if len(doc.Options) > 0 {
- fmt.Fprintf(b, "\nOptions\n")
- for _, opt := range doc.Options {
- fmt.Fprintf(b, " %s", opt)
- }
- fmt.Fprint(b, "\n")
- }
- return b.String()
-}
-
-type Ignore interface {
- Match(p Problem) bool
-}
-
-type LineIgnore struct {
- File string
- Line int
- Checks []string
- Matched bool
- Pos token.Position
-}
-
-func (li *LineIgnore) Match(p Problem) bool {
- pos := p.Pos
- if pos.Filename != li.File || pos.Line != li.Line {
- return false
- }
- for _, c := range li.Checks {
- if m, _ := filepath.Match(c, p.Check); m {
- li.Matched = true
- return true
- }
- }
- return false
-}
-
-func (li *LineIgnore) String() string {
- matched := "not matched"
- if li.Matched {
- matched = "matched"
- }
- return fmt.Sprintf("%s:%d %s (%s)", li.File, li.Line, strings.Join(li.Checks, ", "), matched)
-}
-
-type FileIgnore struct {
- File string
- Checks []string
-}
-
-func (fi *FileIgnore) Match(p Problem) bool {
- if p.Pos.Filename != fi.File {
- return false
- }
- for _, c := range fi.Checks {
- if m, _ := filepath.Match(c, p.Check); m {
- return true
- }
- }
- return false
-}
-
-type Severity uint8
-
-const (
- Error Severity = iota
- Warning
- Ignored
-)
-
-// Problem represents a problem in some source code.
-type Problem struct {
- Pos token.Position
- End token.Position
- Message string
- Check string
- Severity Severity
- Related []Related
-}
-
-type Related struct {
- Pos token.Position
- End token.Position
- Message string
-}
-
-func (p Problem) Equal(o Problem) bool {
- return p.Pos == o.Pos &&
- p.End == o.End &&
- p.Message == o.Message &&
- p.Check == o.Check &&
- p.Severity == o.Severity
-}
-
-func (p *Problem) String() string {
- return fmt.Sprintf("%s (%s)", p.Message, p.Check)
-}
-
-// A Linter lints Go source code.
-type Linter struct {
- Checkers []*analysis.Analyzer
- CumulativeCheckers []CumulativeChecker
- GoVersion int
- Config config.Config
- Stats Stats
- RepeatAnalyzers uint
-}
-
-type CumulativeChecker interface {
- Analyzer() *analysis.Analyzer
- Result() []types.Object
- ProblemObject(*token.FileSet, types.Object) Problem
-}
-
-func (l *Linter) Lint(cfg *packages.Config, patterns []string) ([]Problem, error) {
- var allAnalyzers []*analysis.Analyzer
- allAnalyzers = append(allAnalyzers, l.Checkers...)
- for _, cum := range l.CumulativeCheckers {
- allAnalyzers = append(allAnalyzers, cum.Analyzer())
- }
-
- // The -checks command line flag overrules all configuration
- // files, which means that for `-checks="foo"`, no check other
- // than foo can ever be reported to the user. Make use of this
- // fact to cull the list of analyses we need to run.
-
- // replace "inherit" with "all", as we don't want to base the
- // list of all checks on the default configuration, which
- // disables certain checks.
- checks := make([]string, len(l.Config.Checks))
- copy(checks, l.Config.Checks)
- for i, c := range checks {
- if c == "inherit" {
- checks[i] = "all"
- }
- }
-
- allowed := FilterChecks(allAnalyzers, checks)
- var allowedAnalyzers []*analysis.Analyzer
- for _, c := range l.Checkers {
- if allowed[c.Name] {
- allowedAnalyzers = append(allowedAnalyzers, c)
- }
- }
- hasCumulative := false
- for _, cum := range l.CumulativeCheckers {
- a := cum.Analyzer()
- if allowed[a.Name] {
- hasCumulative = true
- allowedAnalyzers = append(allowedAnalyzers, a)
- }
- }
-
- r, err := NewRunner(&l.Stats)
- if err != nil {
- return nil, err
- }
- r.goVersion = l.GoVersion
- r.repeatAnalyzers = l.RepeatAnalyzers
-
- pkgs, err := r.Run(cfg, patterns, allowedAnalyzers, hasCumulative)
- if err != nil {
- return nil, err
- }
-
- tpkgToPkg := map[*types.Package]*Package{}
- for _, pkg := range pkgs {
- tpkgToPkg[pkg.Types] = pkg
-
- for _, e := range pkg.errs {
- switch e := e.(type) {
- case types.Error:
- p := Problem{
- Pos: e.Fset.PositionFor(e.Pos, false),
- Message: e.Msg,
- Severity: Error,
- Check: "compile",
- }
- pkg.problems = append(pkg.problems, p)
- case packages.Error:
- msg := e.Msg
- if len(msg) != 0 && msg[0] == '\n' {
- // TODO(dh): See https://github.com/golang/go/issues/32363
- msg = msg[1:]
- }
-
- var pos token.Position
- if e.Pos == "" {
- // Under certain conditions (malformed package
- // declarations, multiple packages in the same
- // directory), go list emits an error on stderr
- // instead of JSON. Those errors do not have
- // associated position information in
- // go/packages.Error, even though the output on
- // stderr may contain it.
- if p, n, err := parsePos(msg); err == nil {
- if abs, err := filepath.Abs(p.Filename); err == nil {
- p.Filename = abs
- }
- pos = p
- msg = msg[n+2:]
- }
- } else {
- var err error
- pos, _, err = parsePos(e.Pos)
- if err != nil {
- panic(fmt.Sprintf("internal error: %s", e))
- }
- }
- p := Problem{
- Pos: pos,
- Message: msg,
- Severity: Error,
- Check: "compile",
- }
- pkg.problems = append(pkg.problems, p)
- case scanner.ErrorList:
- for _, e := range e {
- p := Problem{
- Pos: e.Pos,
- Message: e.Msg,
- Severity: Error,
- Check: "compile",
- }
- pkg.problems = append(pkg.problems, p)
- }
- case error:
- p := Problem{
- Pos: token.Position{},
- Message: e.Error(),
- Severity: Error,
- Check: "compile",
- }
- pkg.problems = append(pkg.problems, p)
- }
- }
- }
-
- atomic.StoreUint32(&r.stats.State, StateCumulative)
- for _, cum := range l.CumulativeCheckers {
- for _, res := range cum.Result() {
- pkg := tpkgToPkg[res.Pkg()]
- if pkg == nil {
- panic(fmt.Sprintf("analyzer %s flagged object %s in package %s, a package that we aren't tracking", cum.Analyzer(), res, res.Pkg()))
- }
- allowedChecks := FilterChecks(allowedAnalyzers, pkg.cfg.Merge(l.Config).Checks)
- if allowedChecks[cum.Analyzer().Name] {
- pos := DisplayPosition(pkg.Fset, res.Pos())
- // FIXME(dh): why are we ignoring generated files
- // here? Surely this is specific to 'unused', not all
- // cumulative checkers
- if _, ok := pkg.gen[pos.Filename]; ok {
- continue
- }
- p := cum.ProblemObject(pkg.Fset, res)
- pkg.problems = append(pkg.problems, p)
- }
- }
- }
-
- for _, pkg := range pkgs {
- if !pkg.fromSource {
- // Don't cache packages that we loaded from the cache
- continue
- }
- cpkg := cachedPackage{
- Problems: pkg.problems,
- Ignores: pkg.ignores,
- Config: pkg.cfg,
- }
- buf := &bytes.Buffer{}
- if err := gob.NewEncoder(buf).Encode(cpkg); err != nil {
- return nil, err
- }
- id := cache.Subkey(pkg.actionID, "data "+r.problemsCacheKey)
- if err := r.cache.PutBytes(id, buf.Bytes()); err != nil {
- return nil, err
- }
- }
-
- var problems []Problem
- // Deduplicate line ignores. When U1000 processes a package and
- // its test variant, it will only emit a single problem for an
- // unused object, not two problems. We will, however, have two
- // line ignores, one per package. Without deduplication, one line
- // ignore will be marked as matched, while the other one won't,
- // subsequently reporting a "this linter directive didn't match
- // anything" error.
- ignores := map[token.Position]Ignore{}
- for _, pkg := range pkgs {
- for _, ig := range pkg.ignores {
- if lig, ok := ig.(*LineIgnore); ok {
- ig = ignores[lig.Pos]
- if ig == nil {
- ignores[lig.Pos] = lig
- ig = lig
- }
- }
- for i := range pkg.problems {
- p := &pkg.problems[i]
- if ig.Match(*p) {
- p.Severity = Ignored
- }
- }
- }
-
- if pkg.cfg == nil {
- // The package failed to load, otherwise we would have a
- // valid config. Pass through all errors.
- problems = append(problems, pkg.problems...)
- } else {
- for _, p := range pkg.problems {
- allowedChecks := FilterChecks(allowedAnalyzers, pkg.cfg.Merge(l.Config).Checks)
- allowedChecks["compile"] = true
- if allowedChecks[p.Check] {
- problems = append(problems, p)
- }
- }
- }
-
- for _, ig := range pkg.ignores {
- ig, ok := ig.(*LineIgnore)
- if !ok {
- continue
- }
- ig = ignores[ig.Pos].(*LineIgnore)
- if ig.Matched {
- continue
- }
-
- couldveMatched := false
- allowedChecks := FilterChecks(allowedAnalyzers, pkg.cfg.Merge(l.Config).Checks)
- for _, c := range ig.Checks {
- if !allowedChecks[c] {
- continue
- }
- couldveMatched = true
- break
- }
-
- if !couldveMatched {
- // The ignored checks were disabled for the containing package.
- // Don't flag the ignore for not having matched.
- continue
- }
- p := Problem{
- Pos: ig.Pos,
- Message: "this linter directive didn't match anything; should it be removed?",
- Check: "",
- }
- problems = append(problems, p)
- }
- }
-
- if len(problems) == 0 {
- return nil, nil
- }
-
- sort.Slice(problems, func(i, j int) bool {
- pi := problems[i].Pos
- pj := problems[j].Pos
-
- if pi.Filename != pj.Filename {
- return pi.Filename < pj.Filename
- }
- if pi.Line != pj.Line {
- return pi.Line < pj.Line
- }
- if pi.Column != pj.Column {
- return pi.Column < pj.Column
- }
-
- return problems[i].Message < problems[j].Message
- })
-
- var out []Problem
- out = append(out, problems[0])
- for i, p := range problems[1:] {
- // We may encounter duplicate problems because one file
- // can be part of many packages.
- if !problems[i].Equal(p) {
- out = append(out, p)
- }
- }
- return out, nil
-}
-
-func FilterChecks(allChecks []*analysis.Analyzer, checks []string) map[string]bool {
- // OPT(dh): this entire computation could be cached per package
- allowedChecks := map[string]bool{}
-
- for _, check := range checks {
- b := true
- if len(check) > 1 && check[0] == '-' {
- b = false
- check = check[1:]
- }
- if check == "*" || check == "all" {
- // Match all
- for _, c := range allChecks {
- allowedChecks[c.Name] = b
- }
- } else if strings.HasSuffix(check, "*") {
- // Glob
- prefix := check[:len(check)-1]
- isCat := strings.IndexFunc(prefix, func(r rune) bool { return unicode.IsNumber(r) }) == -1
-
- for _, c := range allChecks {
- idx := strings.IndexFunc(c.Name, func(r rune) bool { return unicode.IsNumber(r) })
- if isCat {
- // Glob is S*, which should match S1000 but not SA1000
- cat := c.Name[:idx]
- if prefix == cat {
- allowedChecks[c.Name] = b
- }
- } else {
- // Glob is S1*
- if strings.HasPrefix(c.Name, prefix) {
- allowedChecks[c.Name] = b
- }
- }
- }
- } else {
- // Literal check name
- allowedChecks[check] = b
- }
- }
- return allowedChecks
-}
-
-func DisplayPosition(fset *token.FileSet, p token.Pos) token.Position {
- if p == token.NoPos {
- return token.Position{}
- }
-
- // Only use the adjusted position if it points to another Go file.
- // This means we'll point to the original file for cgo files, but
- // we won't point to a YACC grammar file.
- pos := fset.PositionFor(p, false)
- adjPos := fset.PositionFor(p, true)
-
- if filepath.Ext(adjPos.Filename) == ".go" {
- return adjPos
- }
- return pos
-}
-
-var bufferPool = &sync.Pool{
- New: func() interface{} {
- buf := bytes.NewBuffer(nil)
- buf.Grow(64)
- return buf
- },
-}
-
-func FuncName(f *types.Func) string {
- buf := bufferPool.Get().(*bytes.Buffer)
- buf.Reset()
- if f.Type() != nil {
- sig := f.Type().(*types.Signature)
- if recv := sig.Recv(); recv != nil {
- buf.WriteByte('(')
- if _, ok := recv.Type().(*types.Interface); ok {
- // gcimporter creates abstract methods of
- // named interfaces using the interface type
- // (not the named type) as the receiver.
- // Don't print it in full.
- buf.WriteString("interface")
- } else {
- types.WriteType(buf, recv.Type(), nil)
- }
- buf.WriteByte(')')
- buf.WriteByte('.')
- } else if f.Pkg() != nil {
- writePackage(buf, f.Pkg())
- }
- }
- buf.WriteString(f.Name())
- s := buf.String()
- bufferPool.Put(buf)
- return s
-}
-
-func writePackage(buf *bytes.Buffer, pkg *types.Package) {
- if pkg == nil {
- return
- }
- s := pkg.Path()
- if s != "" {
- buf.WriteString(s)
- buf.WriteByte('.')
- }
-}
diff --git a/vendor/honnef.co/go/tools/lint/lintdsl/lintdsl.go b/vendor/honnef.co/go/tools/lint/lintdsl/lintdsl.go
deleted file mode 100644
index 4408aff25e4..00000000000
--- a/vendor/honnef.co/go/tools/lint/lintdsl/lintdsl.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Package lintdsl provides helpers for implementing static analysis
-// checks. Dot-importing this package is encouraged.
-package lintdsl
-
-import (
- "bytes"
- "fmt"
- "go/ast"
- "go/format"
-
- "golang.org/x/tools/go/analysis"
- "honnef.co/go/tools/pattern"
-)
-
-func Inspect(node ast.Node, fn func(node ast.Node) bool) {
- if node == nil {
- return
- }
- ast.Inspect(node, fn)
-}
-
-func Match(pass *analysis.Pass, q pattern.Pattern, node ast.Node) (*pattern.Matcher, bool) {
- // Note that we ignore q.Relevant – callers of Match usually use
- // AST inspectors that already filter on nodes we're interested
- // in.
- m := &pattern.Matcher{TypesInfo: pass.TypesInfo}
- ok := m.Match(q.Root, node)
- return m, ok
-}
-
-func MatchAndEdit(pass *analysis.Pass, before, after pattern.Pattern, node ast.Node) (*pattern.Matcher, []analysis.TextEdit, bool) {
- m, ok := Match(pass, before, node)
- if !ok {
- return m, nil, false
- }
- r := pattern.NodeToAST(after.Root, m.State)
- buf := &bytes.Buffer{}
- format.Node(buf, pass.Fset, r)
- edit := []analysis.TextEdit{{
- Pos: node.Pos(),
- End: node.End(),
- NewText: buf.Bytes(),
- }}
- return m, edit, true
-}
-
-func Selector(x, sel string) *ast.SelectorExpr {
- return &ast.SelectorExpr{
- X: &ast.Ident{Name: x},
- Sel: &ast.Ident{Name: sel},
- }
-}
-
-// ExhaustiveTypeSwitch panics when called. It can be used to ensure
-// that type switches are exhaustive.
-func ExhaustiveTypeSwitch(v interface{}) {
- panic(fmt.Sprintf("internal error: unhandled case %T", v))
-}
diff --git a/vendor/honnef.co/go/tools/lint/lintutil/format/format.go b/vendor/honnef.co/go/tools/lint/lintutil/format/format.go
deleted file mode 100644
index b28f8885b87..00000000000
--- a/vendor/honnef.co/go/tools/lint/lintutil/format/format.go
+++ /dev/null
@@ -1,162 +0,0 @@
-// Package format provides formatters for linter problems.
-package format
-
-import (
- "encoding/json"
- "fmt"
- "go/token"
- "io"
- "os"
- "path/filepath"
- "text/tabwriter"
-
- "honnef.co/go/tools/lint"
-)
-
-func shortPath(path string) string {
- cwd, err := os.Getwd()
- if err != nil {
- return path
- }
- if rel, err := filepath.Rel(cwd, path); err == nil && len(rel) < len(path) {
- return rel
- }
- return path
-}
-
-func relativePositionString(pos token.Position) string {
- s := shortPath(pos.Filename)
- if pos.IsValid() {
- if s != "" {
- s += ":"
- }
- s += fmt.Sprintf("%d:%d", pos.Line, pos.Column)
- }
- if s == "" {
- s = "-"
- }
- return s
-}
-
-type Statter interface {
- Stats(total, errors, warnings, ignored int)
-}
-
-type Formatter interface {
- Format(p lint.Problem)
-}
-
-type Text struct {
- W io.Writer
-}
-
-func (o Text) Format(p lint.Problem) {
- fmt.Fprintf(o.W, "%s: %s\n", relativePositionString(p.Pos), p.String())
- for _, r := range p.Related {
- fmt.Fprintf(o.W, "\t%s: %s\n", relativePositionString(r.Pos), r.Message)
- }
-}
-
-type JSON struct {
- W io.Writer
-}
-
-func severity(s lint.Severity) string {
- switch s {
- case lint.Error:
- return "error"
- case lint.Warning:
- return "warning"
- case lint.Ignored:
- return "ignored"
- }
- return ""
-}
-
-func (o JSON) Format(p lint.Problem) {
- type location struct {
- File string `json:"file"`
- Line int `json:"line"`
- Column int `json:"column"`
- }
- type related struct {
- Location location `json:"location"`
- End location `json:"end"`
- Message string `json:"message"`
- }
- jp := struct {
- Code string `json:"code"`
- Severity string `json:"severity,omitempty"`
- Location location `json:"location"`
- End location `json:"end"`
- Message string `json:"message"`
- Related []related `json:"related,omitempty"`
- }{
- Code: p.Check,
- Severity: severity(p.Severity),
- Location: location{
- File: p.Pos.Filename,
- Line: p.Pos.Line,
- Column: p.Pos.Column,
- },
- End: location{
- File: p.End.Filename,
- Line: p.End.Line,
- Column: p.End.Column,
- },
- Message: p.Message,
- }
- for _, r := range p.Related {
- jp.Related = append(jp.Related, related{
- Location: location{
- File: r.Pos.Filename,
- Line: r.Pos.Line,
- Column: r.Pos.Column,
- },
- End: location{
- File: r.End.Filename,
- Line: r.End.Line,
- Column: r.End.Column,
- },
- Message: r.Message,
- })
- }
- _ = json.NewEncoder(o.W).Encode(jp)
-}
-
-type Stylish struct {
- W io.Writer
-
- prevFile string
- tw *tabwriter.Writer
-}
-
-func (o *Stylish) Format(p lint.Problem) {
- pos := p.Pos
- if pos.Filename == "" {
- pos.Filename = "-"
- }
-
- if pos.Filename != o.prevFile {
- if o.prevFile != "" {
- o.tw.Flush()
- fmt.Fprintln(o.W)
- }
- fmt.Fprintln(o.W, pos.Filename)
- o.prevFile = pos.Filename
- o.tw = tabwriter.NewWriter(o.W, 0, 4, 2, ' ', 0)
- }
- fmt.Fprintf(o.tw, " (%d, %d)\t%s\t%s\n", pos.Line, pos.Column, p.Check, p.Message)
- for _, r := range p.Related {
- fmt.Fprintf(o.tw, " (%d, %d)\t\t %s\n", r.Pos.Line, r.Pos.Column, r.Message)
- }
-}
-
-func (o *Stylish) Stats(total, errors, warnings, ignored int) {
- if o.tw != nil {
- o.tw.Flush()
- fmt.Fprintln(o.W)
- }
- fmt.Fprintf(o.W, " ✖ %d problems (%d errors, %d warnings, %d ignored)\n",
- total, errors, warnings, ignored)
-}
diff --git a/vendor/honnef.co/go/tools/lint/lintutil/stats.go b/vendor/honnef.co/go/tools/lint/lintutil/stats.go
deleted file mode 100644
index ba8caf0afdd..00000000000
--- a/vendor/honnef.co/go/tools/lint/lintutil/stats.go
+++ /dev/null
@@ -1,7 +0,0 @@
-// +build !aix,!android,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris
-
-package lintutil
-
-import "os"
-
-var infoSignals = []os.Signal{}
diff --git a/vendor/honnef.co/go/tools/lint/lintutil/stats_bsd.go b/vendor/honnef.co/go/tools/lint/lintutil/stats_bsd.go
deleted file mode 100644
index 3a62ede031c..00000000000
--- a/vendor/honnef.co/go/tools/lint/lintutil/stats_bsd.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// +build darwin dragonfly freebsd netbsd openbsd
-
-package lintutil
-
-import (
- "os"
- "syscall"
-)
-
-var infoSignals = []os.Signal{syscall.SIGINFO}
diff --git a/vendor/honnef.co/go/tools/lint/lintutil/stats_posix.go b/vendor/honnef.co/go/tools/lint/lintutil/stats_posix.go
deleted file mode 100644
index 53f21c666b1..00000000000
--- a/vendor/honnef.co/go/tools/lint/lintutil/stats_posix.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// +build aix android linux solaris
-
-package lintutil
-
-import (
- "os"
- "syscall"
-)
-
-var infoSignals = []os.Signal{syscall.SIGUSR1}
diff --git a/vendor/honnef.co/go/tools/lint/lintutil/util.go b/vendor/honnef.co/go/tools/lint/lintutil/util.go
deleted file mode 100644
index 7c3dbdec193..00000000000
--- a/vendor/honnef.co/go/tools/lint/lintutil/util.go
+++ /dev/null
@@ -1,444 +0,0 @@
-// Copyright (c) 2013 The Go Authors. All rights reserved.
-//
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file or at
-// https://developers.google.com/open-source/licenses/bsd.
-
-// Package lintutil provides helpers for writing linter command lines.
-package lintutil // import "honnef.co/go/tools/lint/lintutil"
-
-import (
- "crypto/sha256"
- "errors"
- "flag"
- "fmt"
- "go/build"
- "go/token"
- "io"
- "log"
- "os"
- "os/signal"
- "regexp"
- "runtime"
- "runtime/pprof"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-
- "honnef.co/go/tools/config"
- "honnef.co/go/tools/internal/cache"
- "honnef.co/go/tools/lint"
- "honnef.co/go/tools/lint/lintutil/format"
- "honnef.co/go/tools/version"
-
- "golang.org/x/tools/go/analysis"
- "golang.org/x/tools/go/buildutil"
- "golang.org/x/tools/go/packages"
-)
-
-func NewVersionFlag() flag.Getter {
- tags := build.Default.ReleaseTags
- v := tags[len(tags)-1][2:]
- version := new(VersionFlag)
- if err := version.Set(v); err != nil {
- panic(fmt.Sprintf("internal error: %s", err))
- }
- return version
-}
-
-type VersionFlag int
-
-func (v *VersionFlag) String() string {
- return fmt.Sprintf("1.%d", *v)
-
-}
-
-func (v *VersionFlag) Set(s string) error {
- if len(s) < 3 {
- return errors.New("invalid Go version")
- }
- if s[0] != '1' {
- return errors.New("invalid Go version")
- }
- if s[1] != '.' {
- return errors.New("invalid Go version")
- }
- i, err := strconv.Atoi(s[2:])
- *v = VersionFlag(i)
- return err
-}
-
-func (v *VersionFlag) Get() interface{} {
- return int(*v)
-}
-
-func usage(name string, flags *flag.FlagSet) func() {
- return func() {
- fmt.Fprintf(os.Stderr, "Usage of %s:\n", name)
- fmt.Fprintf(os.Stderr, "\t%s [flags] # runs on package in current directory\n", name)
- fmt.Fprintf(os.Stderr, "\t%s [flags] packages\n", name)
- fmt.Fprintf(os.Stderr, "\t%s [flags] directory\n", name)
- fmt.Fprintf(os.Stderr, "\t%s [flags] files... # must be a single package\n", name)
- fmt.Fprintf(os.Stderr, "Flags:\n")
- flags.PrintDefaults()
- }
-}
-
-type list []string
-
-func (list *list) String() string {
- return `"` + strings.Join(*list, ",") + `"`
-}
-
-func (list *list) Set(s string) error {
- if s == "" {
- *list = nil
- return nil
- }
-
- *list = strings.Split(s, ",")
- return nil
-}
-
-func FlagSet(name string) *flag.FlagSet {
- flags := flag.NewFlagSet("", flag.ExitOnError)
- flags.Usage = usage(name, flags)
- flags.String("tags", "", "List of `build tags`")
- flags.Bool("tests", true, "Include tests")
- flags.Bool("version", false, "Print version and exit")
- flags.Bool("show-ignored", false, "Don't filter ignored problems")
- flags.String("f", "text", "Output `format` (valid choices are 'stylish', 'text' and 'json')")
- flags.String("explain", "", "Print description of `check`")
-
- flags.String("debug.cpuprofile", "", "Write CPU profile to `file`")
- flags.String("debug.memprofile", "", "Write memory profile to `file`")
- flags.Bool("debug.version", false, "Print detailed version information about this program")
- flags.Bool("debug.no-compile-errors", false, "Don't print compile errors")
- flags.String("debug.measure-analyzers", "", "Write analysis measurements to `file`. `file` will be opened for appending if it already exists.")
- flags.Uint("debug.repeat-analyzers", 0, "Run analyzers `num` times")
-
- checks := list{"inherit"}
- fail := list{"all"}
- flags.Var(&checks, "checks", "Comma-separated list of `checks` to enable.")
- flags.Var(&fail, "fail", "Comma-separated list of `checks` that can cause a non-zero exit status.")
-
- tags := build.Default.ReleaseTags
- v := tags[len(tags)-1][2:]
- version := new(VersionFlag)
- if err := version.Set(v); err != nil {
- panic(fmt.Sprintf("internal error: %s", err))
- }
-
- flags.Var(version, "go", "Target Go `version` in the format '1.x'")
- return flags
-}
-
-func findCheck(cs []*analysis.Analyzer, check string) (*analysis.Analyzer, bool) {
- for _, c := range cs {
- if c.Name == check {
- return c, true
- }
- }
- return nil, false
-}
-
-func ProcessFlagSet(cs []*analysis.Analyzer, cums []lint.CumulativeChecker, fs *flag.FlagSet) {
- tags := fs.Lookup("tags").Value.(flag.Getter).Get().(string)
- tests := fs.Lookup("tests").Value.(flag.Getter).Get().(bool)
- goVersion := fs.Lookup("go").Value.(flag.Getter).Get().(int)
- formatter := fs.Lookup("f").Value.(flag.Getter).Get().(string)
- printVersion := fs.Lookup("version").Value.(flag.Getter).Get().(bool)
- showIgnored := fs.Lookup("show-ignored").Value.(flag.Getter).Get().(bool)
- explain := fs.Lookup("explain").Value.(flag.Getter).Get().(string)
-
- cpuProfile := fs.Lookup("debug.cpuprofile").Value.(flag.Getter).Get().(string)
- memProfile := fs.Lookup("debug.memprofile").Value.(flag.Getter).Get().(string)
- debugVersion := fs.Lookup("debug.version").Value.(flag.Getter).Get().(bool)
- debugNoCompile := fs.Lookup("debug.no-compile-errors").Value.(flag.Getter).Get().(bool)
- debugRepeat := fs.Lookup("debug.repeat-analyzers").Value.(flag.Getter).Get().(uint)
-
- var measureAnalyzers func(analysis *analysis.Analyzer, pkg *lint.Package, d time.Duration)
- if path := fs.Lookup("debug.measure-analyzers").Value.(flag.Getter).Get().(string); path != "" {
- f, err := os.OpenFile(path, os.O_CREATE|os.O_APPEND|os.O_WRONLY, 0600)
- if err != nil {
- log.Fatal(err)
- }
-
- mu := &sync.Mutex{}
- measureAnalyzers = func(analysis *analysis.Analyzer, pkg *lint.Package, d time.Duration) {
- mu.Lock()
- defer mu.Unlock()
- if _, err := fmt.Fprintf(f, "%s\t%s\t%d\n", analysis.Name, pkg.ID, d.Nanoseconds()); err != nil {
- log.Println("error writing analysis measurements:", err)
- }
- }
- }
-
- cfg := config.Config{}
- cfg.Checks = *fs.Lookup("checks").Value.(*list)
-
- exit := func(code int) {
- if cpuProfile != "" {
- pprof.StopCPUProfile()
- }
- if memProfile != "" {
- f, err := os.Create(memProfile)
- if err != nil {
- panic(err)
- }
- runtime.GC()
- pprof.WriteHeapProfile(f)
- }
- os.Exit(code)
- }
- if cpuProfile != "" {
- f, err := os.Create(cpuProfile)
- if err != nil {
- log.Fatal(err)
- }
- pprof.StartCPUProfile(f)
- }
-
- if debugVersion {
- version.Verbose()
- exit(0)
- }
-
- if printVersion {
- version.Print()
- exit(0)
- }
-
- // Validate that the tags argument is well-formed. go/packages
- // doesn't detect malformed build flags and returns unhelpful
- // errors.
- tf := buildutil.TagsFlag{}
- if err := tf.Set(tags); err != nil {
- fmt.Fprintln(os.Stderr, fmt.Errorf("invalid value %q for flag -tags: %s", tags, err))
- exit(1)
- }
-
- if explain != "" {
- var haystack []*analysis.Analyzer
- haystack = append(haystack, cs...)
- for _, cum := range cums {
- haystack = append(haystack, cum.Analyzer())
- }
- check, ok := findCheck(haystack, explain)
- if !ok {
- fmt.Fprintln(os.Stderr, "Couldn't find check", explain)
- exit(1)
- }
- if check.Doc == "" {
- fmt.Fprintln(os.Stderr, explain, "has no documentation")
- exit(1)
- }
- fmt.Println(check.Doc)
- exit(0)
- }
-
- ps, err := Lint(cs, cums, fs.Args(), &Options{
- Tags: tags,
- LintTests: tests,
- GoVersion: goVersion,
- Config: cfg,
- PrintAnalyzerMeasurement: measureAnalyzers,
- RepeatAnalyzers: debugRepeat,
- })
- if err != nil {
- fmt.Fprintln(os.Stderr, err)
- exit(1)
- }
-
- var f format.Formatter
- switch formatter {
- case "text":
- f = format.Text{W: os.Stdout}
- case "stylish":
- f = &format.Stylish{W: os.Stdout}
- case "json":
- f = format.JSON{W: os.Stdout}
- default:
- fmt.Fprintf(os.Stderr, "unsupported output format %q\n", formatter)
- exit(2)
- }
-
- var (
- total int
- errors int
- warnings int
- ignored int
- )
-
- fail := *fs.Lookup("fail").Value.(*list)
- analyzers := make([]*analysis.Analyzer, len(cs), len(cs)+len(cums))
- copy(analyzers, cs)
- for _, cum := range cums {
- analyzers = append(analyzers, cum.Analyzer())
- }
- shouldExit := lint.FilterChecks(analyzers, fail)
- shouldExit["compile"] = true
-
- total = len(ps)
- for _, p := range ps {
- if p.Check == "compile" && debugNoCompile {
- continue
- }
- if p.Severity == lint.Ignored && !showIgnored {
- ignored++
- continue
- }
- if shouldExit[p.Check] {
- errors++
- } else {
- p.Severity = lint.Warning
- warnings++
- }
- f.Format(p)
- }
- if f, ok := f.(format.Statter); ok {
- f.Stats(total, errors, warnings, ignored)
- }
- if errors > 0 {
- exit(1)
- }
- exit(0)
-}
-
-type Options struct {
- Config config.Config
-
- Tags string
- LintTests bool
- GoVersion int
- PrintAnalyzerMeasurement func(analysis *analysis.Analyzer, pkg *lint.Package, d time.Duration)
- RepeatAnalyzers uint
-}
-
-func computeSalt() ([]byte, error) {
- if version.Version != "devel" {
- return []byte(version.Version), nil
- }
- p, err := os.Executable()
- if err != nil {
- return nil, err
- }
- f, err := os.Open(p)
- if err != nil {
- return nil, err
- }
- defer f.Close()
- h := sha256.New()
- if _, err := io.Copy(h, f); err != nil {
- return nil, err
- }
- return h.Sum(nil), nil
-}
-
-func Lint(cs []*analysis.Analyzer, cums []lint.CumulativeChecker, paths []string, opt *Options) ([]lint.Problem, error) {
- salt, err := computeSalt()
- if err != nil {
- return nil, fmt.Errorf("could not compute salt for cache: %s", err)
- }
- cache.SetSalt(salt)
-
- if opt == nil {
- opt = &Options{}
- }
-
- l := &lint.Linter{
- Checkers: cs,
- CumulativeCheckers: cums,
- GoVersion: opt.GoVersion,
- Config: opt.Config,
- RepeatAnalyzers: opt.RepeatAnalyzers,
- }
- l.Stats.PrintAnalyzerMeasurement = opt.PrintAnalyzerMeasurement
- cfg := &packages.Config{}
- if opt.LintTests {
- cfg.Tests = true
- }
- if opt.Tags != "" {
- cfg.BuildFlags = append(cfg.BuildFlags, "-tags", opt.Tags)
- }
-
- printStats := func() {
- // Individual stats are read atomically, but overall there
- // is no synchronisation. For printing rough progress
- // information, this doesn't matter.
- switch atomic.LoadUint32(&l.Stats.State) {
- case lint.StateInitializing:
- fmt.Fprintln(os.Stderr, "Status: initializing")
- case lint.StateGraph:
- fmt.Fprintln(os.Stderr, "Status: loading package graph")
- case lint.StateProcessing:
- fmt.Fprintf(os.Stderr, "Packages: %d/%d initial, %d/%d total; Workers: %d/%d; Problems: %d\n",
- atomic.LoadUint32(&l.Stats.ProcessedInitialPackages),
- atomic.LoadUint32(&l.Stats.InitialPackages),
- atomic.LoadUint32(&l.Stats.ProcessedPackages),
- atomic.LoadUint32(&l.Stats.TotalPackages),
- atomic.LoadUint32(&l.Stats.ActiveWorkers),
- atomic.LoadUint32(&l.Stats.TotalWorkers),
- atomic.LoadUint32(&l.Stats.Problems),
- )
- case lint.StateCumulative:
- fmt.Fprintln(os.Stderr, "Status: processing cumulative checkers")
- }
- }
- if len(infoSignals) > 0 {
- ch := make(chan os.Signal, 1)
- signal.Notify(ch, infoSignals...)
- defer signal.Stop(ch)
- go func() {
- for range ch {
- printStats()
- }
- }()
- }
-
- ps, err := l.Lint(cfg, paths)
- return ps, err
-}
-
-var posRe = regexp.MustCompile(`^(.+?):(\d+)(?::(\d+)?)?$`)
-
-func parsePos(pos string) token.Position {
- if pos == "-" || pos == "" {
- return token.Position{}
- }
- parts := posRe.FindStringSubmatch(pos)
- if parts == nil {
- panic(fmt.Sprintf("internal error: malformed position %q", pos))
- }
- file := parts[1]
- line, _ := strconv.Atoi(parts[2])
- col, _ := strconv.Atoi(parts[3])
- return token.Position{
- Filename: file,
- Line: line,
- Column: col,
- }
-}
-
-func InitializeAnalyzers(docs map[string]*lint.Documentation, analyzers map[string]*analysis.Analyzer) map[string]*analysis.Analyzer {
- out := make(map[string]*analysis.Analyzer, len(analyzers))
- for k, v := range analyzers {
- vc := *v
- out[k] = &vc
-
- vc.Name = k
- doc, ok := docs[k]
- if !ok {
- panic(fmt.Sprintf("missing documentation for check %s", k))
- }
- vc.Doc = doc.String()
- if vc.Flags.Usage == nil {
- fs := flag.NewFlagSet("", flag.PanicOnError)
- fs.Var(NewVersionFlag(), "go", "Target Go version")
- vc.Flags = *fs
- }
- }
- return out
-}
diff --git a/vendor/honnef.co/go/tools/lint/runner.go b/vendor/honnef.co/go/tools/lint/runner.go
deleted file mode 100644
index 74106ced826..00000000000
--- a/vendor/honnef.co/go/tools/lint/runner.go
+++ /dev/null
@@ -1,1114 +0,0 @@
-package lint
-
-/*
-Package loading
-
-Conceptually, package loading in the runner can be imagined as a
-graph-shaped work list. We iteratively pop off leaf nodes (packages
-that have no unloaded dependencies) and load data from export data,
-our cache, or source.
-
-Specifically, non-initial packages are loaded from export data and the
-fact cache if possible, otherwise from source. Initial packages are
-loaded from export data, the fact cache and the (problems, ignores,
-config) cache if possible, otherwise from source.
-
-The appeal of this approach is that it is both simple to implement and
-easily parallelizable. Each leaf node can be processed independently,
-and new leaf nodes appear as their dependencies are being processed.
-
-The downside of this approach, however, is that we're doing more work
-than necessary. Imagine an initial package A, which has the following
-dependency chain: A->B->C->D – in the current implementation, we will
-load all 4 packages. However, if package A can be loaded fully from
-cached information, then none of its dependencies are necessary, and
-we could avoid loading them.
-
-
-Parallelism
-
-Runner implements parallel processing of packages by spawning one
-goroutine per package in the dependency graph, without any semaphores.
-Each goroutine initially waits on the completion of all of its
-dependencies, thus establishing correct order of processing. Once all
-dependencies finish processing, the goroutine will load the package
-from export data or source – this loading is guarded by a semaphore,
-sized according to the number of CPU cores. This way, we only have as
-many packages occupying memory and CPU resources as there are actual
-cores to process them.
-
-This combination of unbounded goroutines but bounded package loading
-means that if we have many parallel, independent subgraphs, they will
-all execute in parallel, while not wasting resources for long linear
-chains or trying to process more subgraphs in parallel than the system
-can handle.
-
-
-Caching
-
-We make use of several caches. These caches are Go's export data, our
-facts cache, and our (problems, ignores, config) cache.
-
-Initial packages will either be loaded from a combination of all three
-caches, or from source. Non-initial packages will either be loaded
-from a combination of export data and facts cache, or from source.
-
-The facts cache is separate from the (problems, ignores, config) cache
-because when we process non-initial packages, we generate facts, but
-we discard problems and ignores.
-
-The facts cache is keyed by (package, analyzer), whereas the
-(problems, ignores, config) cache is keyed by (package, list of
-analyzes). The difference between the two exists because there are
-only a handful of analyses that produce facts, but hundreds of
-analyses that don't. Creating one cache entry per fact-generating
-analysis is feasible, creating one cache entry per normal analysis has
-significant performance and storage overheads.
-
-The downside of keying by the list of analyzes is, naturally, that a
-change in list of analyzes changes the cache key. `staticcheck -checks
-A` and `staticcheck -checks A,B` will therefore need their own cache
-entries and not reuse each other's work. This problem does not affect
-the facts cache.
-
-*/
-
-import (
- "bytes"
- "encoding/gob"
- "encoding/hex"
- "fmt"
- "go/ast"
- "go/token"
- "go/types"
- "reflect"
- "regexp"
- "runtime"
- "sort"
- "strconv"
- "strings"
- "sync"
- "sync/atomic"
- "time"
-
- "golang.org/x/tools/go/analysis"
- "golang.org/x/tools/go/packages"
- "golang.org/x/tools/go/types/objectpath"
- "honnef.co/go/tools/config"
- "honnef.co/go/tools/facts"
- "honnef.co/go/tools/internal/cache"
- "honnef.co/go/tools/loader"
-)
-
-func init() {
- gob.Register(&FileIgnore{})
- gob.Register(&LineIgnore{})
-}
-
-// If enabled, abuse of the go/analysis API will lead to panics
-const sanityCheck = true
-
-// OPT(dh): for a dependency tree A->B->C->D, if we have cached data
-// for B, there should be no need to load C and D individually. Go's
-// export data for B contains all the data we need on types, and our
-// fact cache could store the union of B, C and D in B.
-//
-// This may change unused's behavior, however, as it may observe fewer
-// interfaces from transitive dependencies.
-
-// OPT(dh): every single package will have the same value for
-// canClearTypes. We could move the Package.decUse method to runner to
-// eliminate this field. This is probably not worth it, though. There
-// are only thousands of packages, so the field only takes up
-// kilobytes of memory.
-
-// OPT(dh): do we really need the Package.gen field? it's based
-// trivially on pkg.results and merely caches the result of a type
-// assertion. How often do we actually use the field?
-
-type Package struct {
- // dependents is initially set to 1 plus the number of packages
- // that directly import this package. It is atomically decreased
- // by 1 every time a dependent has been processed or when the
- // package itself has been processed. Once the value reaches zero,
- // the package is no longer needed.
- dependents uint64
-
- *packages.Package
- Imports []*Package
- initial bool
- // fromSource is set to true for packages that have been loaded
- // from source. This is the case for initial packages, packages
- // with missing export data, and packages with no cached facts.
- fromSource bool
- // hash stores the package hash, as computed by packageHash
- hash string
- actionID cache.ActionID
- done chan struct{}
-
- resultsMu sync.Mutex
- // results maps analyzer IDs to analyzer results. it is
- // implemented as a deduplicating concurrent cache.
- results []*result
-
- cfg *config.Config
- // gen maps file names to the code generator that created them
- gen map[string]facts.Generator
- problems []Problem
- ignores []Ignore
- errs []error
-
- // these slices are indexed by analysis
- facts []map[types.Object][]analysis.Fact
- pkgFacts [][]analysis.Fact
-
- // canClearTypes is set to true if we can discard type
- // information after the package and its dependents have been
- // processed. This is the case when no cumulative checkers are
- // being run.
- canClearTypes bool
-}
-
-type cachedPackage struct {
- Problems []Problem
- Ignores []Ignore
- Config *config.Config
-}
-
-func (pkg *Package) decUse() {
- ret := atomic.AddUint64(&pkg.dependents, ^uint64(0))
- if ret == 0 {
- // nobody depends on this package anymore
- if pkg.canClearTypes {
- pkg.Types = nil
- }
- pkg.facts = nil
- pkg.pkgFacts = nil
-
- for _, imp := range pkg.Imports {
- imp.decUse()
- }
- }
-}
-
-type result struct {
- v interface{}
- err error
- ready chan struct{}
-}
-
-type Runner struct {
- cache *cache.Cache
- goVersion int
- stats *Stats
- repeatAnalyzers uint
-
- analyzerIDs analyzerIDs
- problemsCacheKey string
-
- // limits parallelism of loading packages
- loadSem chan struct{}
-}
-
-type analyzerIDs struct {
- m map[*analysis.Analyzer]int
-}
-
-func (ids analyzerIDs) get(a *analysis.Analyzer) int {
- id, ok := ids.m[a]
- if !ok {
- panic(fmt.Sprintf("no analyzer ID for %s", a.Name))
- }
- return id
-}
-
-type Fact struct {
- Path string
- Fact analysis.Fact
-}
-
-type analysisAction struct {
- analyzer *analysis.Analyzer
- analyzerID int
- pkg *Package
- newPackageFacts []analysis.Fact
- problems []Problem
-
- pkgFacts map[*types.Package][]analysis.Fact
-}
-
-func (ac *analysisAction) String() string {
- return fmt.Sprintf("%s @ %s", ac.analyzer, ac.pkg)
-}
-
-func (ac *analysisAction) allObjectFacts() []analysis.ObjectFact {
- out := make([]analysis.ObjectFact, 0, len(ac.pkg.facts[ac.analyzerID]))
- for obj, facts := range ac.pkg.facts[ac.analyzerID] {
- for _, fact := range facts {
- out = append(out, analysis.ObjectFact{
- Object: obj,
- Fact: fact,
- })
- }
- }
- return out
-}
-
-func (ac *analysisAction) allPackageFacts() []analysis.PackageFact {
- out := make([]analysis.PackageFact, 0, len(ac.pkgFacts))
- for pkg, facts := range ac.pkgFacts {
- for _, fact := range facts {
- out = append(out, analysis.PackageFact{
- Package: pkg,
- Fact: fact,
- })
- }
- }
- return out
-}
-
-func (ac *analysisAction) importObjectFact(obj types.Object, fact analysis.Fact) bool {
- if sanityCheck && len(ac.analyzer.FactTypes) == 0 {
- panic("analysis doesn't export any facts")
- }
- for _, f := range ac.pkg.facts[ac.analyzerID][obj] {
- if reflect.TypeOf(f) == reflect.TypeOf(fact) {
- reflect.ValueOf(fact).Elem().Set(reflect.ValueOf(f).Elem())
- return true
- }
- }
- return false
-}
-
-func (ac *analysisAction) importPackageFact(pkg *types.Package, fact analysis.Fact) bool {
- if sanityCheck && len(ac.analyzer.FactTypes) == 0 {
- panic("analysis doesn't export any facts")
- }
- for _, f := range ac.pkgFacts[pkg] {
- if reflect.TypeOf(f) == reflect.TypeOf(fact) {
- reflect.ValueOf(fact).Elem().Set(reflect.ValueOf(f).Elem())
- return true
- }
- }
- return false
-}
-
-func (ac *analysisAction) exportObjectFact(obj types.Object, fact analysis.Fact) {
- if sanityCheck && len(ac.analyzer.FactTypes) == 0 {
- panic("analysis doesn't export any facts")
- }
- ac.pkg.facts[ac.analyzerID][obj] = append(ac.pkg.facts[ac.analyzerID][obj], fact)
-}
-
-func (ac *analysisAction) exportPackageFact(fact analysis.Fact) {
- if sanityCheck && len(ac.analyzer.FactTypes) == 0 {
- panic("analysis doesn't export any facts")
- }
- ac.pkgFacts[ac.pkg.Types] = append(ac.pkgFacts[ac.pkg.Types], fact)
- ac.newPackageFacts = append(ac.newPackageFacts, fact)
-}
-
-func (ac *analysisAction) report(pass *analysis.Pass, d analysis.Diagnostic) {
- p := Problem{
- Pos: DisplayPosition(pass.Fset, d.Pos),
- End: DisplayPosition(pass.Fset, d.End),
- Message: d.Message,
- Check: pass.Analyzer.Name,
- }
- for _, r := range d.Related {
- p.Related = append(p.Related, Related{
- Pos: DisplayPosition(pass.Fset, r.Pos),
- End: DisplayPosition(pass.Fset, r.End),
- Message: r.Message,
- })
- }
- ac.problems = append(ac.problems, p)
-}
-
-func (r *Runner) runAnalysis(ac *analysisAction) (ret interface{}, err error) {
- ac.pkg.resultsMu.Lock()
- res := ac.pkg.results[r.analyzerIDs.get(ac.analyzer)]
- if res != nil {
- ac.pkg.resultsMu.Unlock()
- <-res.ready
- return res.v, res.err
- } else {
- res = &result{
- ready: make(chan struct{}),
- }
- ac.pkg.results[r.analyzerIDs.get(ac.analyzer)] = res
- ac.pkg.resultsMu.Unlock()
-
- defer func() {
- res.v = ret
- res.err = err
- close(res.ready)
- }()
-
- pass := new(analysis.Pass)
- *pass = analysis.Pass{
- Analyzer: ac.analyzer,
- Fset: ac.pkg.Fset,
- Files: ac.pkg.Syntax,
- // type information may be nil or may be populated. if it is
- // nil, it will get populated later.
- Pkg: ac.pkg.Types,
- TypesInfo: ac.pkg.TypesInfo,
- TypesSizes: ac.pkg.TypesSizes,
- ResultOf: map[*analysis.Analyzer]interface{}{},
- ImportObjectFact: ac.importObjectFact,
- ImportPackageFact: ac.importPackageFact,
- ExportObjectFact: ac.exportObjectFact,
- ExportPackageFact: ac.exportPackageFact,
- Report: func(d analysis.Diagnostic) {
- ac.report(pass, d)
- },
- AllObjectFacts: ac.allObjectFacts,
- AllPackageFacts: ac.allPackageFacts,
- }
-
- if !ac.pkg.initial {
- // Don't report problems in dependencies
- pass.Report = func(analysis.Diagnostic) {}
- }
- return r.runAnalysisUser(pass, ac)
- }
-}
-
-func (r *Runner) loadCachedPackage(pkg *Package, analyzers []*analysis.Analyzer) (cachedPackage, bool) {
- // OPT(dh): we can cache this computation, it'll be the same for all packages
- id := cache.Subkey(pkg.actionID, "data "+r.problemsCacheKey)
-
- b, _, err := r.cache.GetBytes(id)
- if err != nil {
- return cachedPackage{}, false
- }
- var cpkg cachedPackage
- if err := gob.NewDecoder(bytes.NewReader(b)).Decode(&cpkg); err != nil {
- return cachedPackage{}, false
- }
- return cpkg, true
-}
-
-func (r *Runner) loadCachedFacts(a *analysis.Analyzer, pkg *Package) ([]Fact, bool) {
- if len(a.FactTypes) == 0 {
- return nil, true
- }
-
- var facts []Fact
- // Look in the cache for facts
- aID := passActionID(pkg, a)
- aID = cache.Subkey(aID, "facts")
- b, _, err := r.cache.GetBytes(aID)
- if err != nil {
- // No cached facts, analyse this package like a user-provided one, but ignore diagnostics
- return nil, false
- }
-
- if err := gob.NewDecoder(bytes.NewReader(b)).Decode(&facts); err != nil {
- // Cached facts are broken, analyse this package like a user-provided one, but ignore diagnostics
- return nil, false
- }
- return facts, true
-}
-
-type dependencyError struct {
- dep string
- err error
-}
-
-func (err dependencyError) nested() dependencyError {
- if o, ok := err.err.(dependencyError); ok {
- return o.nested()
- }
- return err
-}
-
-func (err dependencyError) Error() string {
- if o, ok := err.err.(dependencyError); ok {
- return o.Error()
- }
- return fmt.Sprintf("error running dependency %s: %s", err.dep, err.err)
-}
-
-func (r *Runner) makeAnalysisAction(a *analysis.Analyzer, pkg *Package) *analysisAction {
- aid := r.analyzerIDs.get(a)
- ac := &analysisAction{
- analyzer: a,
- analyzerID: aid,
- pkg: pkg,
- }
-
- if len(a.FactTypes) == 0 {
- return ac
- }
-
- // Merge all package facts of dependencies
- ac.pkgFacts = map[*types.Package][]analysis.Fact{}
- seen := map[*Package]struct{}{}
- var dfs func(*Package)
- dfs = func(pkg *Package) {
- if _, ok := seen[pkg]; ok {
- return
- }
- seen[pkg] = struct{}{}
- s := pkg.pkgFacts[aid]
- ac.pkgFacts[pkg.Types] = s[0:len(s):len(s)]
- for _, imp := range pkg.Imports {
- dfs(imp)
- }
- }
- dfs(pkg)
-
- return ac
-}
-
-// analyzes that we always want to run, even if they're not being run
-// explicitly or as dependencies. these are necessary for the inner
-// workings of the runner.
-var injectedAnalyses = []*analysis.Analyzer{facts.Generated, config.Analyzer}
-
-func (r *Runner) runAnalysisUser(pass *analysis.Pass, ac *analysisAction) (interface{}, error) {
- if !ac.pkg.fromSource {
- panic(fmt.Sprintf("internal error: %s was not loaded from source", ac.pkg))
- }
-
- // User-provided package, analyse it
- // First analyze it with dependencies
- for _, req := range ac.analyzer.Requires {
- acReq := r.makeAnalysisAction(req, ac.pkg)
- ret, err := r.runAnalysis(acReq)
- if err != nil {
- // We couldn't run a dependency, no point in going on
- return nil, dependencyError{req.Name, err}
- }
-
- pass.ResultOf[req] = ret
- }
-
- // Then with this analyzer
- var ret interface{}
- for i := uint(0); i < r.repeatAnalyzers+1; i++ {
- var err error
- t := time.Now()
- ret, err = ac.analyzer.Run(pass)
- r.stats.MeasureAnalyzer(ac.analyzer, ac.pkg, time.Since(t))
- if err != nil {
- return nil, err
- }
- }
-
- if len(ac.analyzer.FactTypes) > 0 {
- // Merge new facts into the package and persist them.
- var facts []Fact
- for _, fact := range ac.newPackageFacts {
- id := r.analyzerIDs.get(ac.analyzer)
- ac.pkg.pkgFacts[id] = append(ac.pkg.pkgFacts[id], fact)
- facts = append(facts, Fact{"", fact})
- }
- for obj, afacts := range ac.pkg.facts[ac.analyzerID] {
- if obj.Pkg() != ac.pkg.Package.Types {
- continue
- }
- path, err := objectpath.For(obj)
- if err != nil {
- continue
- }
- for _, fact := range afacts {
- facts = append(facts, Fact{string(path), fact})
- }
- }
-
- if err := r.cacheData(facts, ac.pkg, ac.analyzer, "facts"); err != nil {
- return nil, err
- }
- }
-
- return ret, nil
-}
-
-func (r *Runner) cacheData(v interface{}, pkg *Package, a *analysis.Analyzer, subkey string) error {
- buf := &bytes.Buffer{}
- if err := gob.NewEncoder(buf).Encode(v); err != nil {
- return err
- }
- aID := passActionID(pkg, a)
- aID = cache.Subkey(aID, subkey)
- if err := r.cache.PutBytes(aID, buf.Bytes()); err != nil {
- return err
- }
- return nil
-}
-
-func NewRunner(stats *Stats) (*Runner, error) {
- cache, err := cache.Default()
- if err != nil {
- return nil, err
- }
-
- return &Runner{
- cache: cache,
- stats: stats,
- }, nil
-}
-
-// Run loads packages corresponding to patterns and analyses them with
-// analyzers. It returns the loaded packages, which contain reported
-// diagnostics as well as extracted ignore directives.
-//
-// Note that diagnostics have not been filtered at this point yet, to
-// accommodate cumulative analyzes that require additional steps to
-// produce diagnostics.
-func (r *Runner) Run(cfg *packages.Config, patterns []string, analyzers []*analysis.Analyzer, hasCumulative bool) ([]*Package, error) {
- checkerNames := make([]string, len(analyzers))
- for i, a := range analyzers {
- checkerNames[i] = a.Name
- }
- sort.Strings(checkerNames)
- r.problemsCacheKey = strings.Join(checkerNames, " ")
-
- var allAnalyzers []*analysis.Analyzer
- r.analyzerIDs = analyzerIDs{m: map[*analysis.Analyzer]int{}}
- id := 0
- seen := map[*analysis.Analyzer]struct{}{}
- var dfs func(a *analysis.Analyzer)
- dfs = func(a *analysis.Analyzer) {
- if _, ok := seen[a]; ok {
- return
- }
- seen[a] = struct{}{}
- allAnalyzers = append(allAnalyzers, a)
- r.analyzerIDs.m[a] = id
- id++
- for _, f := range a.FactTypes {
- gob.Register(f)
- }
- for _, req := range a.Requires {
- dfs(req)
- }
- }
- for _, a := range analyzers {
- if v := a.Flags.Lookup("go"); v != nil {
- v.Value.Set(fmt.Sprintf("1.%d", r.goVersion))
- }
- dfs(a)
- }
- for _, a := range injectedAnalyses {
- dfs(a)
- }
- // Run all analyzers on all packages (subject to further
- // restrictions enforced later). This guarantees that if analyzer
- // A1 depends on A2, and A2 has facts, that A2 will run on the
- // dependencies of user-provided packages, even though A1 won't.
- analyzers = allAnalyzers
-
- var dcfg packages.Config
- if cfg != nil {
- dcfg = *cfg
- }
-
- atomic.StoreUint32(&r.stats.State, StateGraph)
- initialPkgs, err := loader.Graph(dcfg, patterns...)
- if err != nil {
- return nil, err
- }
- defer r.cache.Trim()
-
- var allPkgs []*Package
- m := map[*packages.Package]*Package{}
- packages.Visit(initialPkgs, nil, func(l *packages.Package) {
- m[l] = &Package{
- Package: l,
- results: make([]*result, len(r.analyzerIDs.m)),
- facts: make([]map[types.Object][]analysis.Fact, len(r.analyzerIDs.m)),
- pkgFacts: make([][]analysis.Fact, len(r.analyzerIDs.m)),
- done: make(chan struct{}),
- // every package needs itself
- dependents: 1,
- canClearTypes: !hasCumulative,
- }
- allPkgs = append(allPkgs, m[l])
- for i := range m[l].facts {
- m[l].facts[i] = map[types.Object][]analysis.Fact{}
- }
- for _, err := range l.Errors {
- m[l].errs = append(m[l].errs, err)
- }
- for _, v := range l.Imports {
- m[v].dependents++
- m[l].Imports = append(m[l].Imports, m[v])
- }
-
- m[l].hash, err = r.packageHash(m[l])
- m[l].actionID = packageActionID(m[l])
- if err != nil {
- m[l].errs = append(m[l].errs, err)
- }
- })
-
- pkgs := make([]*Package, len(initialPkgs))
- for i, l := range initialPkgs {
- pkgs[i] = m[l]
- pkgs[i].initial = true
- }
-
- atomic.StoreUint32(&r.stats.InitialPackages, uint32(len(initialPkgs)))
- atomic.StoreUint32(&r.stats.TotalPackages, uint32(len(allPkgs)))
- atomic.StoreUint32(&r.stats.State, StateProcessing)
-
- var wg sync.WaitGroup
- wg.Add(len(allPkgs))
- r.loadSem = make(chan struct{}, runtime.GOMAXPROCS(-1))
- atomic.StoreUint32(&r.stats.TotalWorkers, uint32(cap(r.loadSem)))
- for _, pkg := range allPkgs {
- pkg := pkg
- go func() {
- r.processPkg(pkg, analyzers)
-
- if pkg.initial {
- atomic.AddUint32(&r.stats.ProcessedInitialPackages, 1)
- }
- atomic.AddUint32(&r.stats.Problems, uint32(len(pkg.problems)))
- wg.Done()
- }()
- }
- wg.Wait()
-
- return pkgs, nil
-}
-
-var posRe = regexp.MustCompile(`^(.+?):(\d+)(?::(\d+)?)?`)
-
-func parsePos(pos string) (token.Position, int, error) {
- if pos == "-" || pos == "" {
- return token.Position{}, 0, nil
- }
- parts := posRe.FindStringSubmatch(pos)
- if parts == nil {
- return token.Position{}, 0, fmt.Errorf("malformed position %q", pos)
- }
- file := parts[1]
- line, _ := strconv.Atoi(parts[2])
- col, _ := strconv.Atoi(parts[3])
- return token.Position{
- Filename: file,
- Line: line,
- Column: col,
- }, len(parts[0]), nil
-}
-
-// loadPkg loads a Go package. It may be loaded from a combination of
-// caches, or from source.
-func (r *Runner) loadPkg(pkg *Package, analyzers []*analysis.Analyzer) error {
- if pkg.Types != nil {
- panic(fmt.Sprintf("internal error: %s has already been loaded", pkg.Package))
- }
-
- if pkg.initial {
- // Try to load cached package
- cpkg, ok := r.loadCachedPackage(pkg, analyzers)
- if ok {
- pkg.problems = cpkg.Problems
- pkg.ignores = cpkg.Ignores
- pkg.cfg = cpkg.Config
- } else {
- pkg.fromSource = true
- return loader.LoadFromSource(pkg.Package)
- }
- }
-
- // At this point we're either working with a non-initial package,
- // or we managed to load cached problems for the package. We still
- // need export data and facts.
-
- // OPT(dh): we don't need type information for this package if no
- // other package depends on it. this may be the case for initial
- // packages.
-
- // Load package from export data
- if err := loader.LoadFromExport(pkg.Package); err != nil {
- // We asked Go to give us up to date export data, yet
- // we can't load it. There must be something wrong.
- //
- // Attempt loading from source. This should fail (because
- // otherwise there would be export data); we just want to
- // get the compile errors. If loading from source succeeds
- // we discard the result, anyway. Otherwise we'll fail
- // when trying to reload from export data later.
- //
- // FIXME(dh): we no longer reload from export data, so
- // theoretically we should be able to continue
- pkg.fromSource = true
- if err := loader.LoadFromSource(pkg.Package); err != nil {
- return err
- }
- // Make sure this package can't be imported successfully
- pkg.Package.Errors = append(pkg.Package.Errors, packages.Error{
- Pos: "-",
- Msg: fmt.Sprintf("could not load export data: %s", err),
- Kind: packages.ParseError,
- })
- return fmt.Errorf("could not load export data: %s", err)
- }
-
- failed := false
- seen := make([]bool, len(r.analyzerIDs.m))
- var dfs func(*analysis.Analyzer)
- dfs = func(a *analysis.Analyzer) {
- if seen[r.analyzerIDs.get(a)] {
- return
- }
- seen[r.analyzerIDs.get(a)] = true
-
- if len(a.FactTypes) > 0 {
- facts, ok := r.loadCachedFacts(a, pkg)
- if !ok {
- failed = true
- return
- }
-
- for _, f := range facts {
- if f.Path == "" {
- // This is a package fact
- pkg.pkgFacts[r.analyzerIDs.get(a)] = append(pkg.pkgFacts[r.analyzerIDs.get(a)], f.Fact)
- continue
- }
- obj, err := objectpath.Object(pkg.Types, objectpath.Path(f.Path))
- if err != nil {
- // Be lenient about these errors. For example, when
- // analysing io/ioutil from source, we may get a fact
- // for methods on the devNull type, and objectpath
- // will happily create a path for them. However, when
- // we later load io/ioutil from export data, the path
- // no longer resolves.
- //
- // If an exported type embeds the unexported type,
- // then (part of) the unexported type will become part
- // of the type information and our path will resolve
- // again.
- continue
- }
- pkg.facts[r.analyzerIDs.get(a)][obj] = append(pkg.facts[r.analyzerIDs.get(a)][obj], f.Fact)
- }
- }
-
- for _, req := range a.Requires {
- dfs(req)
- }
- }
- for _, a := range analyzers {
- dfs(a)
- }
-
- if !failed {
- return nil
- }
-
- // We failed to load some cached facts
- pkg.fromSource = true
- // XXX we added facts to the maps, we need to get rid of those
- return loader.LoadFromSource(pkg.Package)
-}
-
-type analysisError struct {
- analyzer *analysis.Analyzer
- pkg *Package
- err error
-}
-
-func (err analysisError) Error() string {
- return fmt.Sprintf("error running analyzer %s on %s: %s", err.analyzer, err.pkg, err.err)
-}
-
-// processPkg processes a package. This involves loading the package,
-// either from export data or from source. For packages loaded from
-// source, the provides analyzers will be run on the package.
-func (r *Runner) processPkg(pkg *Package, analyzers []*analysis.Analyzer) {
- defer func() {
- // Clear information we no longer need. Make sure to do this
- // when returning from processPkg so that we clear
- // dependencies, not just initial packages.
- pkg.TypesInfo = nil
- pkg.Syntax = nil
- pkg.results = nil
-
- atomic.AddUint32(&r.stats.ProcessedPackages, 1)
- pkg.decUse()
- close(pkg.done)
- }()
-
- // Ensure all packages have the generated map and config. This is
- // required by internals of the runner. Analyses that themselves
- // make use of either have an explicit dependency so that other
- // runners work correctly, too.
- analyzers = append(analyzers[0:len(analyzers):len(analyzers)], injectedAnalyses...)
-
- if len(pkg.errs) != 0 {
- return
- }
-
- for _, imp := range pkg.Imports {
- <-imp.done
- if len(imp.errs) > 0 {
- if imp.initial {
- // Don't print the error of the dependency since it's
- // an initial package and we're already printing the
- // error.
- pkg.errs = append(pkg.errs, fmt.Errorf("could not analyze dependency %s of %s", imp, pkg))
- } else {
- var s string
- for _, err := range imp.errs {
- s += "\n\t" + err.Error()
- }
- pkg.errs = append(pkg.errs, fmt.Errorf("could not analyze dependency %s of %s: %s", imp, pkg, s))
- }
- return
- }
- }
- if pkg.PkgPath == "unsafe" {
- pkg.Types = types.Unsafe
- return
- }
-
- r.loadSem <- struct{}{}
- atomic.AddUint32(&r.stats.ActiveWorkers, 1)
- defer func() {
- <-r.loadSem
- atomic.AddUint32(&r.stats.ActiveWorkers, ^uint32(0))
- }()
- if err := r.loadPkg(pkg, analyzers); err != nil {
- pkg.errs = append(pkg.errs, err)
- return
- }
-
- // A package's object facts is the union of all of its dependencies.
- for _, imp := range pkg.Imports {
- for ai, m := range imp.facts {
- for obj, facts := range m {
- pkg.facts[ai][obj] = facts[0:len(facts):len(facts)]
- }
- }
- }
-
- if !pkg.fromSource {
- // Nothing left to do for the package.
- return
- }
-
- // Run analyses on initial packages and those missing facts
- var wg sync.WaitGroup
- wg.Add(len(analyzers))
- errs := make([]error, len(analyzers))
- var acs []*analysisAction
- for i, a := range analyzers {
- i := i
- a := a
- ac := r.makeAnalysisAction(a, pkg)
- acs = append(acs, ac)
- go func() {
- defer wg.Done()
- // Only initial packages and packages with missing
- // facts will have been loaded from source.
- if pkg.initial || len(a.FactTypes) > 0 {
- if _, err := r.runAnalysis(ac); err != nil {
- errs[i] = analysisError{a, pkg, err}
- return
- }
- }
- }()
- }
- wg.Wait()
-
- depErrors := map[dependencyError]int{}
- for _, err := range errs {
- if err == nil {
- continue
- }
- switch err := err.(type) {
- case analysisError:
- switch err := err.err.(type) {
- case dependencyError:
- depErrors[err.nested()]++
- default:
- pkg.errs = append(pkg.errs, err)
- }
- default:
- pkg.errs = append(pkg.errs, err)
- }
- }
- for err, count := range depErrors {
- pkg.errs = append(pkg.errs,
- fmt.Errorf("could not run %s@%s, preventing %d analyzers from running: %s", err.dep, pkg, count, err.err))
- }
-
- // We can't process ignores at this point because `unused` needs
- // to see more than one package to make its decision.
- //
- // OPT(dh): can't we guard this block of code by pkg.initial?
- ignores, problems := parseDirectives(pkg.Package)
- pkg.ignores = append(pkg.ignores, ignores...)
- pkg.problems = append(pkg.problems, problems...)
- for _, ac := range acs {
- pkg.problems = append(pkg.problems, ac.problems...)
- }
-
- if pkg.initial {
- // Only initial packages have these analyzers run, and only
- // initial packages need these.
- if pkg.results[r.analyzerIDs.get(config.Analyzer)].v != nil {
- pkg.cfg = pkg.results[r.analyzerIDs.get(config.Analyzer)].v.(*config.Config)
- }
- pkg.gen = pkg.results[r.analyzerIDs.get(facts.Generated)].v.(map[string]facts.Generator)
- }
-
- // In a previous version of the code, we would throw away all type
- // information and reload it from export data. That was
- // nonsensical. The *types.Package doesn't keep any information
- // live that export data wouldn't also. We only need to discard
- // the AST and the TypesInfo maps; that happens after we return
- // from processPkg.
-}
-
-func parseDirective(s string) (cmd string, args []string) {
- if !strings.HasPrefix(s, "//lint:") {
- return "", nil
- }
- s = strings.TrimPrefix(s, "//lint:")
- fields := strings.Split(s, " ")
- return fields[0], fields[1:]
-}
-
-// parseDirectives extracts all linter directives from the source
-// files of the package. Malformed directives are returned as problems.
-func parseDirectives(pkg *packages.Package) ([]Ignore, []Problem) {
- var ignores []Ignore
- var problems []Problem
-
- for _, f := range pkg.Syntax {
- found := false
- commentLoop:
- for _, cg := range f.Comments {
- for _, c := range cg.List {
- if strings.Contains(c.Text, "//lint:") {
- found = true
- break commentLoop
- }
- }
- }
- if !found {
- continue
- }
- cm := ast.NewCommentMap(pkg.Fset, f, f.Comments)
- for node, cgs := range cm {
- for _, cg := range cgs {
- for _, c := range cg.List {
- if !strings.HasPrefix(c.Text, "//lint:") {
- continue
- }
- cmd, args := parseDirective(c.Text)
- switch cmd {
- case "ignore", "file-ignore":
- if len(args) < 2 {
- p := Problem{
- Pos: DisplayPosition(pkg.Fset, c.Pos()),
- Message: "malformed linter directive; missing the required reason field?",
- Severity: Error,
- Check: "compile",
- }
- problems = append(problems, p)
- continue
- }
- default:
- // unknown directive, ignore
- continue
- }
- checks := strings.Split(args[0], ",")
- pos := DisplayPosition(pkg.Fset, node.Pos())
- var ig Ignore
- switch cmd {
- case "ignore":
- ig = &LineIgnore{
- File: pos.Filename,
- Line: pos.Line,
- Checks: checks,
- Pos: DisplayPosition(pkg.Fset, c.Pos()),
- }
- case "file-ignore":
- ig = &FileIgnore{
- File: pos.Filename,
- Checks: checks,
- }
- }
- ignores = append(ignores, ig)
- }
- }
- }
- }
-
- return ignores, problems
-}
-
-// packageHash computes a package's hash. The hash is based on all Go
-// files that make up the package, as well as the hashes of imported
-// packages.
-func (r *Runner) packageHash(pkg *Package) (string, error) {
- key := cache.NewHash("package hash")
- fmt.Fprintf(key, "pkgpath %s\n", pkg.PkgPath)
- fmt.Fprintf(key, "go %d\n", r.goVersion)
- for _, f := range pkg.CompiledGoFiles {
- h, err := cache.FileHash(f)
- if err != nil {
- return "", err
- }
- fmt.Fprintf(key, "file %s %x\n", f, h)
- }
-
- // Actually load the configuration to calculate its hash. This
- // will take into consideration inheritance of configuration
- // files, as well as the default configuration.
- //
- // OPT(dh): doing this means we'll load the config twice: once for
- // computing the hash, and once when analyzing the package from
- // source.
- cdir := config.Dir(pkg.GoFiles)
- if cdir == "" {
- fmt.Fprintf(key, "file %s %x\n", config.ConfigName, [cache.HashSize]byte{})
- } else {
- cfg, err := config.Load(cdir)
- if err != nil {
- return "", err
- }
- h := cache.NewHash(config.ConfigName)
- if _, err := h.Write([]byte(cfg.String())); err != nil {
- return "", err
- }
- fmt.Fprintf(key, "file %s %x\n", config.ConfigName, h.Sum())
- }
-
- imps := make([]*Package, len(pkg.Imports))
- copy(imps, pkg.Imports)
- sort.Slice(imps, func(i, j int) bool {
- return imps[i].PkgPath < imps[j].PkgPath
- })
- for _, dep := range imps {
- if dep.PkgPath == "unsafe" {
- continue
- }
-
- fmt.Fprintf(key, "import %s %s\n", dep.PkgPath, dep.hash)
- }
- h := key.Sum()
- return hex.EncodeToString(h[:]), nil
-}
-
-func packageActionID(pkg *Package) cache.ActionID {
- key := cache.NewHash("package ID")
- fmt.Fprintf(key, "pkgpath %s\n", pkg.PkgPath)
- fmt.Fprintf(key, "pkghash %s\n", pkg.hash)
- return key.Sum()
-}
-
-// passActionID computes an ActionID for an analysis pass.
-func passActionID(pkg *Package, analyzer *analysis.Analyzer) cache.ActionID {
- return cache.Subkey(pkg.actionID, fmt.Sprintf("analyzer %s", analyzer.Name))
-}
diff --git a/vendor/honnef.co/go/tools/lint/stats.go b/vendor/honnef.co/go/tools/lint/stats.go
deleted file mode 100644
index 85eb9784489..00000000000
--- a/vendor/honnef.co/go/tools/lint/stats.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package lint
-
-import (
- "time"
-
- "golang.org/x/tools/go/analysis"
-)
-
-const (
- StateInitializing = 0
- StateGraph = 1
- StateProcessing = 2
- StateCumulative = 3
-)
-
-type Stats struct {
- State uint32
-
- InitialPackages uint32
- TotalPackages uint32
- ProcessedPackages uint32
- ProcessedInitialPackages uint32
- Problems uint32
- ActiveWorkers uint32
- TotalWorkers uint32
- PrintAnalyzerMeasurement func(*analysis.Analyzer, *Package, time.Duration)
-}
-
-type AnalysisMeasurementKey struct {
- Analysis string
- Pkg string
-}
-
-func (s *Stats) MeasureAnalyzer(analysis *analysis.Analyzer, pkg *Package, d time.Duration) {
- if s.PrintAnalyzerMeasurement != nil {
- s.PrintAnalyzerMeasurement(analysis, pkg, d)
- }
-}
diff --git a/vendor/honnef.co/go/tools/loader/loader.go b/vendor/honnef.co/go/tools/loader/loader.go
deleted file mode 100644
index a14f274d293..00000000000
--- a/vendor/honnef.co/go/tools/loader/loader.go
+++ /dev/null
@@ -1,210 +0,0 @@
-package loader
-
-import (
- "errors"
- "fmt"
- "go/ast"
- "go/parser"
- "go/scanner"
- "go/token"
- "go/types"
- "log"
- "os"
-
- "golang.org/x/tools/go/gcexportdata"
- "golang.org/x/tools/go/packages"
-)
-
-// Graph resolves patterns and returns packages with all the
-// information required to later load type information, and optionally
-// syntax trees.
-//
-// The provided config can set any setting with the exception of Mode.
-func Graph(cfg packages.Config, patterns ...string) ([]*packages.Package, error) {
- cfg.Mode = packages.NeedName | packages.NeedImports | packages.NeedDeps | packages.NeedExportsFile | packages.NeedFiles | packages.NeedCompiledGoFiles | packages.NeedTypesSizes
- pkgs, err := packages.Load(&cfg, patterns...)
- if err != nil {
- return nil, err
- }
- fset := token.NewFileSet()
- packages.Visit(pkgs, nil, func(pkg *packages.Package) {
- pkg.Fset = fset
- })
-
- n := 0
- for _, pkg := range pkgs {
- if len(pkg.CompiledGoFiles) == 0 && len(pkg.Errors) == 0 && pkg.PkgPath != "unsafe" {
- // If a package consists only of test files, then
- // go/packages incorrectly(?) returns an empty package for
- // the non-test variant. Get rid of those packages. See
- // #646.
- //
- // Do not, however, skip packages that have errors. Those,
- // too, may have no files, but we want to print the
- // errors.
- continue
- }
- pkgs[n] = pkg
- n++
- }
- return pkgs[:n], nil
-}
-
-// LoadFromExport loads a package from export data. All of its
-// dependencies must have been loaded already.
-func LoadFromExport(pkg *packages.Package) error {
- pkg.IllTyped = true
- for path, pkg := range pkg.Imports {
- if pkg.Types == nil {
- return fmt.Errorf("dependency %q hasn't been loaded yet", path)
- }
- }
- if pkg.ExportFile == "" {
- return fmt.Errorf("no export data for %q", pkg.ID)
- }
- f, err := os.Open(pkg.ExportFile)
- if err != nil {
- return err
- }
- defer f.Close()
-
- r, err := gcexportdata.NewReader(f)
- if err != nil {
- return err
- }
-
- view := make(map[string]*types.Package) // view seen by gcexportdata
- seen := make(map[*packages.Package]bool) // all visited packages
- var visit func(pkgs map[string]*packages.Package)
- visit = func(pkgs map[string]*packages.Package) {
- for _, pkg := range pkgs {
- if !seen[pkg] {
- seen[pkg] = true
- view[pkg.PkgPath] = pkg.Types
- visit(pkg.Imports)
- }
- }
- }
- visit(pkg.Imports)
- tpkg, err := gcexportdata.Read(r, pkg.Fset, view, pkg.PkgPath)
- if err != nil {
- return err
- }
- pkg.Types = tpkg
- pkg.IllTyped = false
- return nil
-}
-
-// LoadFromSource loads a package from source. All of its dependencies
-// must have been loaded already.
-func LoadFromSource(pkg *packages.Package) error {
- pkg.IllTyped = true
- pkg.Types = types.NewPackage(pkg.PkgPath, pkg.Name)
-
- // OPT(dh): many packages have few files, much fewer than there
- // are CPU cores. Additionally, parsing each individual file is
- // very fast. A naive parallel implementation of this loop won't
- // be faster, and tends to be slower due to extra scheduling,
- // bookkeeping and potentially false sharing of cache lines.
- pkg.Syntax = make([]*ast.File, len(pkg.CompiledGoFiles))
- for i, file := range pkg.CompiledGoFiles {
- f, err := parser.ParseFile(pkg.Fset, file, nil, parser.ParseComments)
- if err != nil {
- pkg.Errors = append(pkg.Errors, convertError(err)...)
- return err
- }
- pkg.Syntax[i] = f
- }
- pkg.TypesInfo = &types.Info{
- Types: make(map[ast.Expr]types.TypeAndValue),
- Defs: make(map[*ast.Ident]types.Object),
- Uses: make(map[*ast.Ident]types.Object),
- Implicits: make(map[ast.Node]types.Object),
- Scopes: make(map[ast.Node]*types.Scope),
- Selections: make(map[*ast.SelectorExpr]*types.Selection),
- }
-
- importer := func(path string) (*types.Package, error) {
- if path == "unsafe" {
- return types.Unsafe, nil
- }
- if path == "C" {
- // go/packages doesn't tell us that cgo preprocessing
- // failed. When we subsequently try to parse the package,
- // we'll encounter the raw C import.
- return nil, errors.New("cgo preprocessing failed")
- }
- imp := pkg.Imports[path]
- if imp == nil {
- return nil, nil
- }
- if len(imp.Errors) > 0 {
- return nil, imp.Errors[0]
- }
- return imp.Types, nil
- }
- tc := &types.Config{
- Importer: importerFunc(importer),
- Error: func(err error) {
- pkg.Errors = append(pkg.Errors, convertError(err)...)
- },
- }
- err := types.NewChecker(tc, pkg.Fset, pkg.Types, pkg.TypesInfo).Files(pkg.Syntax)
- if err != nil {
- return err
- }
- pkg.IllTyped = false
- return nil
-}
-
-func convertError(err error) []packages.Error {
- var errs []packages.Error
- // taken from go/packages
- switch err := err.(type) {
- case packages.Error:
- // from driver
- errs = append(errs, err)
-
- case *os.PathError:
- // from parser
- errs = append(errs, packages.Error{
- Pos: err.Path + ":1",
- Msg: err.Err.Error(),
- Kind: packages.ParseError,
- })
-
- case scanner.ErrorList:
- // from parser
- for _, err := range err {
- errs = append(errs, packages.Error{
- Pos: err.Pos.String(),
- Msg: err.Msg,
- Kind: packages.ParseError,
- })
- }
-
- case types.Error:
- // from type checker
- errs = append(errs, packages.Error{
- Pos: err.Fset.Position(err.Pos).String(),
- Msg: err.Msg,
- Kind: packages.TypeError,
- })
-
- default:
- // unexpected impoverished error from parser?
- errs = append(errs, packages.Error{
- Pos: "-",
- Msg: err.Error(),
- Kind: packages.UnknownError,
- })
-
- // If you see this error message, please file a bug.
- log.Printf("internal error: error %q (%T) without position", err, err)
- }
- return errs
-}
-
-type importerFunc func(path string) (*types.Package, error)
-
-func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) }
diff --git a/vendor/honnef.co/go/tools/pattern/convert.go b/vendor/honnef.co/go/tools/pattern/convert.go
deleted file mode 100644
index dfcd1560d74..00000000000
--- a/vendor/honnef.co/go/tools/pattern/convert.go
+++ /dev/null
@@ -1,242 +0,0 @@
-package pattern
-
-import (
- "fmt"
- "go/ast"
- "go/token"
- "go/types"
- "reflect"
-)
-
-var astTypes = map[string]reflect.Type{
- "Ellipsis": reflect.TypeOf(ast.Ellipsis{}),
- "RangeStmt": reflect.TypeOf(ast.RangeStmt{}),
- "AssignStmt": reflect.TypeOf(ast.AssignStmt{}),
- "IndexExpr": reflect.TypeOf(ast.IndexExpr{}),
- "Ident": reflect.TypeOf(ast.Ident{}),
- "ValueSpec": reflect.TypeOf(ast.ValueSpec{}),
- "GenDecl": reflect.TypeOf(ast.GenDecl{}),
- "BinaryExpr": reflect.TypeOf(ast.BinaryExpr{}),
- "ForStmt": reflect.TypeOf(ast.ForStmt{}),
- "ArrayType": reflect.TypeOf(ast.ArrayType{}),
- "DeferStmt": reflect.TypeOf(ast.DeferStmt{}),
- "MapType": reflect.TypeOf(ast.MapType{}),
- "ReturnStmt": reflect.TypeOf(ast.ReturnStmt{}),
- "SliceExpr": reflect.TypeOf(ast.SliceExpr{}),
- "StarExpr": reflect.TypeOf(ast.StarExpr{}),
- "UnaryExpr": reflect.TypeOf(ast.UnaryExpr{}),
- "SendStmt": reflect.TypeOf(ast.SendStmt{}),
- "SelectStmt": reflect.TypeOf(ast.SelectStmt{}),
- "ImportSpec": reflect.TypeOf(ast.ImportSpec{}),
- "IfStmt": reflect.TypeOf(ast.IfStmt{}),
- "GoStmt": reflect.TypeOf(ast.GoStmt{}),
- "Field": reflect.TypeOf(ast.Field{}),
- "SelectorExpr": reflect.TypeOf(ast.SelectorExpr{}),
- "StructType": reflect.TypeOf(ast.StructType{}),
- "KeyValueExpr": reflect.TypeOf(ast.KeyValueExpr{}),
- "FuncType": reflect.TypeOf(ast.FuncType{}),
- "FuncLit": reflect.TypeOf(ast.FuncLit{}),
- "FuncDecl": reflect.TypeOf(ast.FuncDecl{}),
- "ChanType": reflect.TypeOf(ast.ChanType{}),
- "CallExpr": reflect.TypeOf(ast.CallExpr{}),
- "CaseClause": reflect.TypeOf(ast.CaseClause{}),
- "CommClause": reflect.TypeOf(ast.CommClause{}),
- "CompositeLit": reflect.TypeOf(ast.CompositeLit{}),
- "EmptyStmt": reflect.TypeOf(ast.EmptyStmt{}),
- "SwitchStmt": reflect.TypeOf(ast.SwitchStmt{}),
- "TypeSwitchStmt": reflect.TypeOf(ast.TypeSwitchStmt{}),
- "TypeAssertExpr": reflect.TypeOf(ast.TypeAssertExpr{}),
- "TypeSpec": reflect.TypeOf(ast.TypeSpec{}),
- "InterfaceType": reflect.TypeOf(ast.InterfaceType{}),
- "BranchStmt": reflect.TypeOf(ast.BranchStmt{}),
- "IncDecStmt": reflect.TypeOf(ast.IncDecStmt{}),
- "BasicLit": reflect.TypeOf(ast.BasicLit{}),
-}
-
-func ASTToNode(node interface{}) Node {
- switch node := node.(type) {
- case *ast.File:
- panic("cannot convert *ast.File to Node")
- case nil:
- return Nil{}
- case string:
- return String(node)
- case token.Token:
- return Token(node)
- case *ast.ExprStmt:
- return ASTToNode(node.X)
- case *ast.BlockStmt:
- if node == nil {
- return Nil{}
- }
- return ASTToNode(node.List)
- case *ast.FieldList:
- if node == nil {
- return Nil{}
- }
- return ASTToNode(node.List)
- case *ast.BasicLit:
- if node == nil {
- return Nil{}
- }
- case *ast.ParenExpr:
- return ASTToNode(node.X)
- }
-
- if node, ok := node.(ast.Node); ok {
- name := reflect.TypeOf(node).Elem().Name()
- T, ok := structNodes[name]
- if !ok {
- panic(fmt.Sprintf("internal error: unhandled type %T", node))
- }
-
- if reflect.ValueOf(node).IsNil() {
- return Nil{}
- }
- v := reflect.ValueOf(node).Elem()
- objs := make([]Node, T.NumField())
- for i := 0; i < T.NumField(); i++ {
- f := v.FieldByName(T.Field(i).Name)
- objs[i] = ASTToNode(f.Interface())
- }
-
- n, err := populateNode(name, objs, false)
- if err != nil {
- panic(fmt.Sprintf("internal error: %s", err))
- }
- return n
- }
-
- s := reflect.ValueOf(node)
- if s.Kind() == reflect.Slice {
- if s.Len() == 0 {
- return List{}
- }
- if s.Len() == 1 {
- return ASTToNode(s.Index(0).Interface())
- }
-
- tail := List{}
- for i := s.Len() - 1; i >= 0; i-- {
- head := ASTToNode(s.Index(i).Interface())
- l := List{
- Head: head,
- Tail: tail,
- }
- tail = l
- }
- return tail
- }
-
- panic(fmt.Sprintf("internal error: unhandled type %T", node))
-}
-
-func NodeToAST(node Node, state State) interface{} {
- switch node := node.(type) {
- case Binding:
- v, ok := state[node.Name]
- if !ok {
- // really we want to return an error here
- panic("XXX")
- }
- switch v := v.(type) {
- case types.Object:
- return &ast.Ident{Name: v.Name()}
- default:
- return v
- }
- case Builtin, Any, Object, Function, Not, Or:
- panic("XXX")
- case List:
- if (node == List{}) {
- return []ast.Node{}
- }
- x := []ast.Node{NodeToAST(node.Head, state).(ast.Node)}
- x = append(x, NodeToAST(node.Tail, state).([]ast.Node)...)
- return x
- case Token:
- return token.Token(node)
- case String:
- return string(node)
- case Nil:
- return nil
- }
-
- name := reflect.TypeOf(node).Name()
- T, ok := astTypes[name]
- if !ok {
- panic(fmt.Sprintf("internal error: unhandled type %T", node))
- }
- v := reflect.ValueOf(node)
- out := reflect.New(T)
- for i := 0; i < T.NumField(); i++ {
- fNode := v.FieldByName(T.Field(i).Name)
- if (fNode == reflect.Value{}) {
- continue
- }
- fAST := out.Elem().FieldByName(T.Field(i).Name)
- switch fAST.Type().Kind() {
- case reflect.Slice:
- c := reflect.ValueOf(NodeToAST(fNode.Interface().(Node), state))
- if c.Kind() != reflect.Slice {
- // it's a single node in the pattern, we have to wrap
- // it in a slice
- slice := reflect.MakeSlice(fAST.Type(), 1, 1)
- slice.Index(0).Set(c)
- c = slice
- }
- switch fAST.Interface().(type) {
- case []ast.Node:
- switch cc := c.Interface().(type) {
- case []ast.Node:
- fAST.Set(c)
- case []ast.Expr:
- var slice []ast.Node
- for _, el := range cc {
- slice = append(slice, el)
- }
- fAST.Set(reflect.ValueOf(slice))
- default:
- panic("XXX")
- }
- case []ast.Expr:
- switch cc := c.Interface().(type) {
- case []ast.Node:
- var slice []ast.Expr
- for _, el := range cc {
- slice = append(slice, el.(ast.Expr))
- }
- fAST.Set(reflect.ValueOf(slice))
- case []ast.Expr:
- fAST.Set(c)
- default:
- panic("XXX")
- }
- default:
- panic("XXX")
- }
- case reflect.Int:
- c := reflect.ValueOf(NodeToAST(fNode.Interface().(Node), state))
- switch c.Kind() {
- case reflect.String:
- tok, ok := tokensByString[c.Interface().(string)]
- if !ok {
- // really we want to return an error here
- panic("XXX")
- }
- fAST.SetInt(int64(tok))
- case reflect.Int:
- fAST.Set(c)
- default:
- panic(fmt.Sprintf("internal error: unexpected kind %s", c.Kind()))
- }
- default:
- r := NodeToAST(fNode.Interface().(Node), state)
- if r != nil {
- fAST.Set(reflect.ValueOf(r))
- }
- }
- }
-
- return out.Interface().(ast.Node)
-}
diff --git a/vendor/honnef.co/go/tools/pattern/doc.go b/vendor/honnef.co/go/tools/pattern/doc.go
deleted file mode 100644
index 05d86c25144..00000000000
--- a/vendor/honnef.co/go/tools/pattern/doc.go
+++ /dev/null
@@ -1,273 +0,0 @@
-/*
-Package pattern implements a simple language for pattern matching Go ASTs.
-
-Design decisions and trade-offs
-
-The language is designed specifically for the task of filtering ASTs
-to simplify the implementation of analyses in staticcheck.
-It is also intended to be trivial to parse and execute.
-
-To that end, we make certain decisions that make the language more
-suited to its task, while making certain queries infeasible.
-
-Furthermore, it is fully expected that the majority of analyses will still require ordinary Go code
-to further process the filtered AST, to make use of type information and to enforce complex invariants.
-It is not our goal to design a scripting language for writing entire checks in.
-
-The language
-
-At its core, patterns are a representation of Go ASTs, allowing for the use of placeholders to enable pattern matching.
-Their syntax is inspired by LISP and Haskell, but unlike LISP, the core unit of patterns isn't the list, but the node.
-There is a fixed set of nodes, identified by name, and with the exception of the Or node, all nodes have a fixed number of arguments.
-In addition to nodes, there are atoms, which represent basic units such as strings or the nil value.
-
-Pattern matching is implemented via bindings, represented by the Binding node.
-A Binding can match nodes and associate them with names, to later recall the nodes.
-This allows for expressing "this node must be equal to that node" constraints.
-
-To simplify writing and reading patterns, a small amount of additional syntax exists on top of nodes and atoms.
-This additional syntax doesn't add any new features of its own, it simply provides shortcuts to creating nodes and atoms.
-
-To show an example of a pattern, first consider this snippet of Go code:
-
- if x := fn(); x != nil {
- for _, v := range x {
- println(v, x)
- }
- }
-
-The corresponding AST expressed as an idiomatic pattern would look as follows:
-
- (IfStmt
- (AssignStmt (Ident "x") ":=" (CallExpr (Ident "fn") []))
- (BinaryExpr (Ident "x") "!=" (Ident "nil"))
- (RangeStmt
- (Ident "_") (Ident "v") ":=" (Ident "x")
- (CallExpr (Ident "println") [(Ident "v") (Ident "x")]))
- nil)
-
-Two things are worth noting about this representation.
-First, the [el1 el2 ...] syntax is a short-hand for creating lists.
-It is a short-hand for el1:el2:[], which itself is a short-hand for (List el1 (List el2 (List nil nil)).
-Second, note the absence of a lot of lists in places that normally accept lists.
-For example, assignment assigns a number of right-hands to a number of left-hands, yet our AssignStmt is lacking any form of list.
-This is due to the fact that a single node can match a list of exactly one element.
-Thus, the two following forms have identical matching behavior:
-
- (AssignStmt (Ident "x") ":=" (CallExpr (Ident "fn") []))
- (AssignStmt [(Ident "x")] ":=" [(CallExpr (Ident "fn") [])])
-
-This section serves as an overview of the language's syntax.
-More in-depth explanations of the matching behavior as well as an exhaustive list of node types follows in the coming sections.
-
-Pattern matching
-
-TODO write about pattern matching
-
-- inspired by haskell syntax, but much, much simpler and naive
-
-Node types
-
-The language contains two kinds of nodes: those that map to nodes in the AST, and those that implement additional logic.
-
-Nodes that map directly to AST nodes are named identically to the types in the go/ast package.
-What follows is an exhaustive list of these nodes:
-
- (ArrayType len elt)
- (AssignStmt lhs tok rhs)
- (BasicLit kind value)
- (BinaryExpr x op y)
- (BranchStmt tok label)
- (CallExpr fun args)
- (CaseClause list body)
- (ChanType dir value)
- (CommClause comm body)
- (CompositeLit type elts)
- (DeferStmt call)
- (Ellipsis elt)
- (EmptyStmt)
- (Field names type tag)
- (ForStmt init cond post body)
- (FuncDecl recv name type body)
- (FuncLit type body)
- (FuncType params results)
- (GenDecl specs)
- (GoStmt call)
- (Ident name)
- (IfStmt init cond body else)
- (ImportSpec name path)
- (IncDecStmt x tok)
- (IndexExpr x index)
- (InterfaceType methods)
- (KeyValueExpr key value)
- (MapType key value)
- (RangeStmt key value tok x body)
- (ReturnStmt results)
- (SelectStmt body)
- (SelectorExpr x sel)
- (SendStmt chan value)
- (SliceExpr x low high max)
- (StarExpr x)
- (StructType fields)
- (SwitchStmt init tag body)
- (TypeAssertExpr)
- (TypeSpec name type)
- (TypeSwitchStmt init assign body)
- (UnaryExpr op x)
- (ValueSpec names type values)
-
-Additionally, there are the String, Token and nil atoms.
-Strings are double-quoted string literals, as in (Ident "someName").
-Tokens are also represented as double-quoted string literals, but are converted to token.Token values in contexts that require tokens,
-such as in (BinaryExpr x "<" y), where "<" is transparently converted to token.LSS during matching.
-The keyword 'nil' denotes the nil value, which represents the absence of any value.
-
-We also defines the (List head tail) node, which is used to represent sequences of elements as a singly linked list.
-The head is a single element, and the tail is the remainder of the list.
-For example,
-
- (List "foo" (List "bar" (List "baz" (List nil nil))))
-
-represents a list of three elements, "foo", "bar" and "baz". There is dedicated syntax for writing lists, which looks as follows:
-
- ["foo" "bar" "baz"]
-
-This syntax is itself syntactic sugar for the following form:
-
- "foo":"bar":"baz":[]
-
-This form is of particular interest for pattern matching, as it allows matching on the head and tail. For example,
-
- "foo":"bar":_
-
-would match any list with at least two elements, where the first two elements are "foo" and "bar". This is equivalent to writing
-
- (List "foo" (List "bar" _))
-
-Note that it is not possible to match from the end of the list.
-That is, there is no way to express a query such as "a list of any length where the last element is foo".
-
-Note that unlike in LISP, nil and empty lists are distinct from one another.
-In patterns, with respect to lists, nil is akin to Go's untyped nil.
-It will match a nil ast.Node, but it will not match a nil []ast.Expr. Nil will, however, match pointers to named types such as *ast.Ident.
-Similarly, lists are akin to Go's
-slices. An empty list will match both a nil and an empty []ast.Expr, but it will not match a nil ast.Node.
-
-Due to the difference between nil and empty lists, an empty list is represented as (List nil nil), i.e. a list with no head or tail.
-Similarly, a list of one element is represented as (List el (List nil nil)). Unlike in LISP, it cannot be represented by (List el nil).
-
-Finally, there are nodes that implement special logic or matching behavior.
-
-(Any) matches any value. The underscore (_) maps to this node, making the following two forms equivalent:
-
- (Ident _)
- (Ident (Any))
-
-(Builtin name) matches a built-in identifier or function by name.
-This is a type-aware variant of (Ident name).
-Instead of only comparing the name, it resolves the object behind the name and makes sure it's a pre-declared identifier.
-
-For example, in the following piece of code
-
- func fn() {
- println(true)
- true := false
- println(true)
- }
-
-the pattern
-
- (Builtin "true")
-
-will match exactly once, on the first use of 'true' in the function.
-Subsequent occurrences of 'true' no longer refer to the pre-declared identifier.
-
-(Object name) matches an identifier by name, but yields the
-types.Object it refers to.
-
-(Function name) matches ast.Idents and ast.SelectorExprs that refer to a function with a given fully qualified name.
-For example, "net/url.PathEscape" matches the PathEscape function in the net/url package,
-and "(net/url.EscapeError).Error" refers to the Error method on the net/url.EscapeError type,
-either on an instance of the type, or on the type itself.
-
-For example, the following patterns match the following lines of code:
-
- (CallExpr (Function "fmt.Println") _) // pattern 1
- (CallExpr (Function "(net/url.EscapeError).Error") _) // pattern 2
-
- fmt.Println("hello, world") // matches pattern 1
- var x url.EscapeError
- x.Error() // matches pattern 2
- (url.EscapeError).Error(x) // also matches pattern 2
-
-(Binding name node) creates or uses a binding.
-Bindings work like variable assignments, allowing referring to already matched nodes.
-As an example, bindings are necessary to match self-assignment of the form "x = x",
-since we need to express that the right-hand side is identical to the left-hand side.
-
-If a binding's node is not nil, the matcher will attempt to match a node according to the pattern.
-If a binding's node is nil, the binding will either recall an existing value, or match the Any node.
-It is an error to provide a non-nil node to a binding that has already been bound.
-
-Referring back to the earlier example, the following pattern will match self-assignment of idents:
-
- (AssignStmt (Binding "lhs" (Ident _)) "=" (Binding "lhs" nil))
-
-Because bindings are a crucial component of pattern matching, there is special syntax for creating and recalling bindings.
-Lower-case names refer to bindings. If standing on its own, the name "foo" will be equivalent to (Binding "foo" nil).
-If a name is followed by an at-sign (@) then it will create a binding for the node that follows.
-Together, this allows us to rewrite the earlier example as follows:
-
- (AssignStmt lhs@(Ident _) "=" lhs)
-
-(Or nodes...) is a variadic node that tries matching each node until one succeeds. For example, the following pattern matches all idents of name "foo" or "bar":
-
- (Ident (Or "foo" "bar"))
-
-We could also have written
-
- (Or (Ident "foo") (Ident "bar"))
-
-and achieved the same result. We can also mix different kinds of nodes:
-
- (Or (Ident "foo") (CallExpr (Ident "bar") _))
-
-When using bindings inside of nodes used inside Or, all or none of the bindings will be bound.
-That is, partially matched nodes that ultimately failed to match will not produce any bindings observable outside of the matching attempt.
-We can thus write
-
- (Or (Ident name) (CallExpr name))
-
-and 'name' will either be a String if the first option matched, or an Ident or SelectorExpr if the second option matched.
-
-(Not node)
-
-The Not node negates a match. For example, (Not (Ident _)) will match all nodes that aren't identifiers.
-
-ChanDir(0)
-
-Automatic unnesting of AST nodes
-
-The Go AST has several types of nodes that wrap other nodes.
-To simplify matching, we automatically unwrap some of these nodes.
-
-These nodes are ExprStmt (for using expressions in a statement context),
-ParenExpr (for parenthesized expressions),
-DeclStmt (for declarations in a statement context),
-and LabeledStmt (for labeled statements).
-
-Thus, the query
-
- (FuncLit _ [(CallExpr _ _)]
-
-will match a function literal containing a single function call,
-even though in the actual Go AST, the CallExpr is nested inside an ExprStmt,
-as function bodies are made up of sequences of statements.
-
-On the flip-side, there is no way to specifically match these wrapper nodes.
-For example, there is no way of searching for unnecessary parentheses, like in the following piece of Go code:
-
- ((x)) += 2
-
-*/
-package pattern
diff --git a/vendor/honnef.co/go/tools/pattern/fuzz.go b/vendor/honnef.co/go/tools/pattern/fuzz.go
deleted file mode 100644
index 52e7df9742b..00000000000
--- a/vendor/honnef.co/go/tools/pattern/fuzz.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// +build gofuzz
-
-package pattern
-
-import (
- "go/ast"
- goparser "go/parser"
- "go/token"
- "os"
- "path/filepath"
- "strings"
-)
-
-var files []*ast.File
-
-func init() {
- fset := token.NewFileSet()
- filepath.Walk("/usr/lib/go/src", func(path string, info os.FileInfo, err error) error {
- if err != nil {
- // XXX error handling
- panic(err)
- }
- if !strings.HasSuffix(path, ".go") {
- return nil
- }
- f, err := goparser.ParseFile(fset, path, nil, 0)
- if err != nil {
- return nil
- }
- files = append(files, f)
- return nil
- })
-}
-
-func Fuzz(data []byte) int {
- p := &Parser{}
- pat, err := p.Parse(string(data))
- if err != nil {
- if strings.Contains(err.Error(), "internal error") {
- panic(err)
- }
- return 0
- }
- _ = pat.Root.String()
-
- for _, f := range files {
- Match(pat.Root, f)
- }
- return 1
-}
diff --git a/vendor/honnef.co/go/tools/pattern/lexer.go b/vendor/honnef.co/go/tools/pattern/lexer.go
deleted file mode 100644
index fb72e392bde..00000000000
--- a/vendor/honnef.co/go/tools/pattern/lexer.go
+++ /dev/null
@@ -1,221 +0,0 @@
-package pattern
-
-import (
- "fmt"
- "go/token"
- "unicode"
- "unicode/utf8"
-)
-
-type lexer struct {
- f *token.File
-
- input string
- start int
- pos int
- width int
- items chan item
-}
-
-type itemType int
-
-const eof = -1
-
-const (
- itemError itemType = iota
- itemLeftParen
- itemRightParen
- itemLeftBracket
- itemRightBracket
- itemTypeName
- itemVariable
- itemAt
- itemColon
- itemBlank
- itemString
- itemEOF
-)
-
-func (typ itemType) String() string {
- switch typ {
- case itemError:
- return "ERROR"
- case itemLeftParen:
- return "("
- case itemRightParen:
- return ")"
- case itemLeftBracket:
- return "["
- case itemRightBracket:
- return "]"
- case itemTypeName:
- return "TYPE"
- case itemVariable:
- return "VAR"
- case itemAt:
- return "@"
- case itemColon:
- return ":"
- case itemBlank:
- return "_"
- case itemString:
- return "STRING"
- case itemEOF:
- return "EOF"
- default:
- return fmt.Sprintf("itemType(%d)", typ)
- }
-}
-
-type item struct {
- typ itemType
- val string
- pos int
-}
-
-type stateFn func(*lexer) stateFn
-
-func (l *lexer) run() {
- for state := lexStart; state != nil; {
- state = state(l)
- }
- close(l.items)
-}
-
-func (l *lexer) emitValue(t itemType, value string) {
- l.items <- item{t, value, l.start}
- l.start = l.pos
-}
-
-func (l *lexer) emit(t itemType) {
- l.items <- item{t, l.input[l.start:l.pos], l.start}
- l.start = l.pos
-}
-
-func lexStart(l *lexer) stateFn {
- switch r := l.next(); {
- case r == eof:
- l.emit(itemEOF)
- return nil
- case unicode.IsSpace(r):
- l.ignore()
- case r == '(':
- l.emit(itemLeftParen)
- case r == ')':
- l.emit(itemRightParen)
- case r == '[':
- l.emit(itemLeftBracket)
- case r == ']':
- l.emit(itemRightBracket)
- case r == '@':
- l.emit(itemAt)
- case r == ':':
- l.emit(itemColon)
- case r == '_':
- l.emit(itemBlank)
- case r == '"':
- l.backup()
- return lexString
- case unicode.IsUpper(r):
- l.backup()
- return lexType
- case unicode.IsLower(r):
- l.backup()
- return lexVariable
- default:
- return l.errorf("unexpected character %c", r)
- }
- return lexStart
-}
-
-func (l *lexer) next() (r rune) {
- if l.pos >= len(l.input) {
- l.width = 0
- return eof
- }
- r, l.width = utf8.DecodeRuneInString(l.input[l.pos:])
-
- if r == '\n' {
- l.f.AddLine(l.pos)
- }
-
- l.pos += l.width
-
- return r
-}
-
-func (l *lexer) ignore() {
- l.start = l.pos
-}
-
-func (l *lexer) backup() {
- l.pos -= l.width
-}
-
-func (l *lexer) errorf(format string, args ...interface{}) stateFn {
- // TODO(dh): emit position information in errors
- l.items <- item{
- itemError,
- fmt.Sprintf(format, args...),
- l.start,
- }
- return nil
-}
-
-func isAlphaNumeric(r rune) bool {
- return r >= '0' && r <= '9' ||
- r >= 'a' && r <= 'z' ||
- r >= 'A' && r <= 'Z'
-}
-
-func lexString(l *lexer) stateFn {
- l.next() // skip quote
- escape := false
-
- var runes []rune
- for {
- switch r := l.next(); r {
- case eof:
- return l.errorf("unterminated string")
- case '"':
- if !escape {
- l.emitValue(itemString, string(runes))
- return lexStart
- } else {
- runes = append(runes, '"')
- escape = false
- }
- case '\\':
- if escape {
- runes = append(runes, '\\')
- escape = false
- } else {
- escape = true
- }
- default:
- runes = append(runes, r)
- }
- }
-}
-
-func lexType(l *lexer) stateFn {
- l.next()
- for {
- if !isAlphaNumeric(l.next()) {
- l.backup()
- l.emit(itemTypeName)
- return lexStart
- }
- }
-}
-
-func lexVariable(l *lexer) stateFn {
- l.next()
- for {
- if !isAlphaNumeric(l.next()) {
- l.backup()
- l.emit(itemVariable)
- return lexStart
- }
- }
-}
diff --git a/vendor/honnef.co/go/tools/pattern/match.go b/vendor/honnef.co/go/tools/pattern/match.go
deleted file mode 100644
index ff039baa75d..00000000000
--- a/vendor/honnef.co/go/tools/pattern/match.go
+++ /dev/null
@@ -1,513 +0,0 @@
-package pattern
-
-import (
- "fmt"
- "go/ast"
- "go/token"
- "go/types"
- "reflect"
-
- "honnef.co/go/tools/lint"
-)
-
-var tokensByString = map[string]Token{
- "INT": Token(token.INT),
- "FLOAT": Token(token.FLOAT),
- "IMAG": Token(token.IMAG),
- "CHAR": Token(token.CHAR),
- "STRING": Token(token.STRING),
- "+": Token(token.ADD),
- "-": Token(token.SUB),
- "*": Token(token.MUL),
- "/": Token(token.QUO),
- "%": Token(token.REM),
- "&": Token(token.AND),
- "|": Token(token.OR),
- "^": Token(token.XOR),
- "<<": Token(token.SHL),
- ">>": Token(token.SHR),
- "&^": Token(token.AND_NOT),
- "+=": Token(token.ADD_ASSIGN),
- "-=": Token(token.SUB_ASSIGN),
- "*=": Token(token.MUL_ASSIGN),
- "/=": Token(token.QUO_ASSIGN),
- "%=": Token(token.REM_ASSIGN),
- "&=": Token(token.AND_ASSIGN),
- "|=": Token(token.OR_ASSIGN),
- "^=": Token(token.XOR_ASSIGN),
- "<<=": Token(token.SHL_ASSIGN),
- ">>=": Token(token.SHR_ASSIGN),
- "&^=": Token(token.AND_NOT_ASSIGN),
- "&&": Token(token.LAND),
- "||": Token(token.LOR),
- "<-": Token(token.ARROW),
- "++": Token(token.INC),
- "--": Token(token.DEC),
- "==": Token(token.EQL),
- "<": Token(token.LSS),
- ">": Token(token.GTR),
- "=": Token(token.ASSIGN),
- "!": Token(token.NOT),
- "!=": Token(token.NEQ),
- "<=": Token(token.LEQ),
- ">=": Token(token.GEQ),
- ":=": Token(token.DEFINE),
- "...": Token(token.ELLIPSIS),
- "IMPORT": Token(token.IMPORT),
- "VAR": Token(token.VAR),
- "TYPE": Token(token.TYPE),
- "CONST": Token(token.CONST),
-}
-
-func maybeToken(node Node) (Node, bool) {
- if node, ok := node.(String); ok {
- if tok, ok := tokensByString[string(node)]; ok {
- return tok, true
- }
- return node, false
- }
- return node, false
-}
-
-func isNil(v interface{}) bool {
- if v == nil {
- return true
- }
- if _, ok := v.(Nil); ok {
- return true
- }
- return false
-}
-
-type matcher interface {
- Match(*Matcher, interface{}) (interface{}, bool)
-}
-
-type State = map[string]interface{}
-
-type Matcher struct {
- TypesInfo *types.Info
- State State
-}
-
-func (m *Matcher) fork() *Matcher {
- state := make(State, len(m.State))
- for k, v := range m.State {
- state[k] = v
- }
- return &Matcher{
- TypesInfo: m.TypesInfo,
- State: state,
- }
-}
-
-func (m *Matcher) merge(mc *Matcher) {
- m.State = mc.State
-}
-
-func (m *Matcher) Match(a Node, b ast.Node) bool {
- m.State = State{}
- _, ok := match(m, a, b)
- return ok
-}
-
-func Match(a Node, b ast.Node) (*Matcher, bool) {
- m := &Matcher{}
- ret := m.Match(a, b)
- return m, ret
-}
-
-// Match two items, which may be (Node, AST) or (AST, AST)
-func match(m *Matcher, l, r interface{}) (interface{}, bool) {
- if _, ok := r.(Node); ok {
- panic("Node mustn't be on right side of match")
- }
-
- switch l := l.(type) {
- case *ast.ParenExpr:
- return match(m, l.X, r)
- case *ast.ExprStmt:
- return match(m, l.X, r)
- case *ast.DeclStmt:
- return match(m, l.Decl, r)
- case *ast.LabeledStmt:
- return match(m, l.Stmt, r)
- case *ast.BlockStmt:
- return match(m, l.List, r)
- case *ast.FieldList:
- return match(m, l.List, r)
- }
-
- switch r := r.(type) {
- case *ast.ParenExpr:
- return match(m, l, r.X)
- case *ast.ExprStmt:
- return match(m, l, r.X)
- case *ast.DeclStmt:
- return match(m, l, r.Decl)
- case *ast.LabeledStmt:
- return match(m, l, r.Stmt)
- case *ast.BlockStmt:
- if r == nil {
- return match(m, l, nil)
- }
- return match(m, l, r.List)
- case *ast.FieldList:
- if r == nil {
- return match(m, l, nil)
- }
- return match(m, l, r.List)
- case *ast.BasicLit:
- if r == nil {
- return match(m, l, nil)
- }
- }
-
- if l, ok := l.(matcher); ok {
- return l.Match(m, r)
- }
-
- if l, ok := l.(Node); ok {
- // Matching of pattern with concrete value
- return matchNodeAST(m, l, r)
- }
-
- if l == nil || r == nil {
- return nil, l == r
- }
-
- {
- ln, ok1 := l.(ast.Node)
- rn, ok2 := r.(ast.Node)
- if ok1 && ok2 {
- return matchAST(m, ln, rn)
- }
- }
-
- {
- obj, ok := l.(types.Object)
- if ok {
- switch r := r.(type) {
- case *ast.Ident:
- return obj, obj == m.TypesInfo.ObjectOf(r)
- case *ast.SelectorExpr:
- return obj, obj == m.TypesInfo.ObjectOf(r.Sel)
- default:
- return obj, false
- }
- }
- }
-
- {
- ln, ok1 := l.([]ast.Expr)
- rn, ok2 := r.([]ast.Expr)
- if ok1 || ok2 {
- if ok1 && !ok2 {
- rn = []ast.Expr{r.(ast.Expr)}
- } else if !ok1 && ok2 {
- ln = []ast.Expr{l.(ast.Expr)}
- }
-
- if len(ln) != len(rn) {
- return nil, false
- }
- for i, ll := range ln {
- if _, ok := match(m, ll, rn[i]); !ok {
- return nil, false
- }
- }
- return r, true
- }
- }
-
- {
- ln, ok1 := l.([]ast.Stmt)
- rn, ok2 := r.([]ast.Stmt)
- if ok1 || ok2 {
- if ok1 && !ok2 {
- rn = []ast.Stmt{r.(ast.Stmt)}
- } else if !ok1 && ok2 {
- ln = []ast.Stmt{l.(ast.Stmt)}
- }
-
- if len(ln) != len(rn) {
- return nil, false
- }
- for i, ll := range ln {
- if _, ok := match(m, ll, rn[i]); !ok {
- return nil, false
- }
- }
- return r, true
- }
- }
-
- panic(fmt.Sprintf("unsupported comparison: %T and %T", l, r))
-}
-
-// Match a Node with an AST node
-func matchNodeAST(m *Matcher, a Node, b interface{}) (interface{}, bool) {
- switch b := b.(type) {
- case []ast.Stmt:
- // 'a' is not a List or we'd be using its Match
- // implementation.
-
- if len(b) != 1 {
- return nil, false
- }
- return match(m, a, b[0])
- case []ast.Expr:
- // 'a' is not a List or we'd be using its Match
- // implementation.
-
- if len(b) != 1 {
- return nil, false
- }
- return match(m, a, b[0])
- case ast.Node:
- ra := reflect.ValueOf(a)
- rb := reflect.ValueOf(b).Elem()
-
- if ra.Type().Name() != rb.Type().Name() {
- return nil, false
- }
-
- for i := 0; i < ra.NumField(); i++ {
- af := ra.Field(i)
- fieldName := ra.Type().Field(i).Name
- bf := rb.FieldByName(fieldName)
- if (bf == reflect.Value{}) {
- panic(fmt.Sprintf("internal error: could not find field %s in type %t when comparing with %T", fieldName, b, a))
- }
- ai := af.Interface()
- bi := bf.Interface()
- if ai == nil {
- return b, bi == nil
- }
- if _, ok := match(m, ai.(Node), bi); !ok {
- return b, false
- }
- }
- return b, true
- case nil:
- return nil, a == Nil{}
- default:
- panic(fmt.Sprintf("unhandled type %T", b))
- }
-}
-
-// Match two AST nodes
-func matchAST(m *Matcher, a, b ast.Node) (interface{}, bool) {
- ra := reflect.ValueOf(a)
- rb := reflect.ValueOf(b)
-
- if ra.Type() != rb.Type() {
- return nil, false
- }
- if ra.IsNil() || rb.IsNil() {
- return rb, ra.IsNil() == rb.IsNil()
- }
-
- ra = ra.Elem()
- rb = rb.Elem()
- for i := 0; i < ra.NumField(); i++ {
- af := ra.Field(i)
- bf := rb.Field(i)
- if af.Type() == rtTokPos || af.Type() == rtObject || af.Type() == rtCommentGroup {
- continue
- }
-
- switch af.Kind() {
- case reflect.Slice:
- if af.Len() != bf.Len() {
- return nil, false
- }
- for j := 0; j < af.Len(); j++ {
- if _, ok := match(m, af.Index(j).Interface().(ast.Node), bf.Index(j).Interface().(ast.Node)); !ok {
- return nil, false
- }
- }
- case reflect.String:
- if af.String() != bf.String() {
- return nil, false
- }
- case reflect.Int:
- if af.Int() != bf.Int() {
- return nil, false
- }
- case reflect.Bool:
- if af.Bool() != bf.Bool() {
- return nil, false
- }
- case reflect.Ptr, reflect.Interface:
- if _, ok := match(m, af.Interface(), bf.Interface()); !ok {
- return nil, false
- }
- default:
- panic(fmt.Sprintf("internal error: unhandled kind %s (%T)", af.Kind(), af.Interface()))
- }
- }
- return b, true
-}
-
-func (b Binding) Match(m *Matcher, node interface{}) (interface{}, bool) {
- if isNil(b.Node) {
- v, ok := m.State[b.Name]
- if ok {
- // Recall value
- return match(m, v, node)
- }
- // Matching anything
- b.Node = Any{}
- }
-
- // Store value
- if _, ok := m.State[b.Name]; ok {
- panic(fmt.Sprintf("binding already created: %s", b.Name))
- }
- new, ret := match(m, b.Node, node)
- if ret {
- m.State[b.Name] = new
- }
- return new, ret
-}
-
-func (Any) Match(m *Matcher, node interface{}) (interface{}, bool) {
- return node, true
-}
-
-func (l List) Match(m *Matcher, node interface{}) (interface{}, bool) {
- v := reflect.ValueOf(node)
- if v.Kind() == reflect.Slice {
- if isNil(l.Head) {
- return node, v.Len() == 0
- }
- if v.Len() == 0 {
- return nil, false
- }
- // OPT(dh): don't check the entire tail if head didn't match
- _, ok1 := match(m, l.Head, v.Index(0).Interface())
- _, ok2 := match(m, l.Tail, v.Slice(1, v.Len()).Interface())
- return node, ok1 && ok2
- }
- // Our empty list does not equal an untyped Go nil. This way, we can
- // tell apart an if with no else and an if with an empty else.
- return nil, false
-}
-
-func (s String) Match(m *Matcher, node interface{}) (interface{}, bool) {
- switch o := node.(type) {
- case token.Token:
- if tok, ok := maybeToken(s); ok {
- return match(m, tok, node)
- }
- return nil, false
- case string:
- return o, string(s) == o
- default:
- return nil, false
- }
-}
-
-func (tok Token) Match(m *Matcher, node interface{}) (interface{}, bool) {
- o, ok := node.(token.Token)
- if !ok {
- return nil, false
- }
- return o, token.Token(tok) == o
-}
-
-func (Nil) Match(m *Matcher, node interface{}) (interface{}, bool) {
- return nil, isNil(node)
-}
-
-func (builtin Builtin) Match(m *Matcher, node interface{}) (interface{}, bool) {
- ident, ok := node.(*ast.Ident)
- if !ok {
- return nil, false
- }
- obj := m.TypesInfo.ObjectOf(ident)
- if obj != types.Universe.Lookup(ident.Name) {
- return nil, false
- }
- return match(m, builtin.Name, ident.Name)
-}
-
-func (obj Object) Match(m *Matcher, node interface{}) (interface{}, bool) {
- ident, ok := node.(*ast.Ident)
- if !ok {
- return nil, false
- }
-
- id := m.TypesInfo.ObjectOf(ident)
- _, ok = match(m, obj.Name, ident.Name)
- return id, ok
-}
-
-func (fn Function) Match(m *Matcher, node interface{}) (interface{}, bool) {
- var name string
- var obj types.Object
- switch node := node.(type) {
- case *ast.Ident:
- obj = m.TypesInfo.ObjectOf(node)
- switch obj := obj.(type) {
- case *types.Func:
- name = lint.FuncName(obj)
- case *types.Builtin:
- name = obj.Name()
- default:
- return nil, false
- }
- case *ast.SelectorExpr:
- var ok bool
- obj, ok = m.TypesInfo.ObjectOf(node.Sel).(*types.Func)
- if !ok {
- return nil, false
- }
- name = lint.FuncName(obj.(*types.Func))
- default:
- return nil, false
- }
- _, ok := match(m, fn.Name, name)
- return obj, ok
-}
-
-func (or Or) Match(m *Matcher, node interface{}) (interface{}, bool) {
- for _, opt := range or.Nodes {
- mc := m.fork()
- if ret, ok := match(mc, opt, node); ok {
- m.merge(mc)
- return ret, true
- }
- }
- return nil, false
-}
-
-func (not Not) Match(m *Matcher, node interface{}) (interface{}, bool) {
- _, ok := match(m, not.Node, node)
- if ok {
- return nil, false
- }
- return node, true
-}
-
-var (
- // Types of fields in go/ast structs that we want to skip
- rtTokPos = reflect.TypeOf(token.Pos(0))
- rtObject = reflect.TypeOf((*ast.Object)(nil))
- rtCommentGroup = reflect.TypeOf((*ast.CommentGroup)(nil))
-)
-
-var (
- _ matcher = Binding{}
- _ matcher = Any{}
- _ matcher = List{}
- _ matcher = String("")
- _ matcher = Token(0)
- _ matcher = Nil{}
- _ matcher = Builtin{}
- _ matcher = Object{}
- _ matcher = Function{}
- _ matcher = Or{}
- _ matcher = Not{}
-)
diff --git a/vendor/honnef.co/go/tools/pattern/parser.go b/vendor/honnef.co/go/tools/pattern/parser.go
deleted file mode 100644
index 009238b8608..00000000000
--- a/vendor/honnef.co/go/tools/pattern/parser.go
+++ /dev/null
@@ -1,455 +0,0 @@
-package pattern
-
-import (
- "fmt"
- "go/ast"
- "go/token"
- "reflect"
-)
-
-type Pattern struct {
- Root Node
- // Relevant contains instances of ast.Node that could potentially
- // initiate a successful match of the pattern.
- Relevant []reflect.Type
-}
-
-func MustParse(s string) Pattern {
- p := &Parser{AllowTypeInfo: true}
- pat, err := p.Parse(s)
- if err != nil {
- panic(err)
- }
- return pat
-}
-
-func roots(node Node) []reflect.Type {
- switch node := node.(type) {
- case Or:
- var out []reflect.Type
- for _, el := range node.Nodes {
- out = append(out, roots(el)...)
- }
- return out
- case Not:
- return roots(node.Node)
- case Binding:
- return roots(node.Node)
- case Nil, nil:
- // this branch is reached via bindings
- return allTypes
- default:
- Ts, ok := nodeToASTTypes[reflect.TypeOf(node)]
- if !ok {
- panic(fmt.Sprintf("internal error: unhandled type %T", node))
- }
- return Ts
- }
-}
-
-var allTypes = []reflect.Type{
- reflect.TypeOf((*ast.RangeStmt)(nil)),
- reflect.TypeOf((*ast.AssignStmt)(nil)),
- reflect.TypeOf((*ast.IndexExpr)(nil)),
- reflect.TypeOf((*ast.Ident)(nil)),
- reflect.TypeOf((*ast.ValueSpec)(nil)),
- reflect.TypeOf((*ast.GenDecl)(nil)),
- reflect.TypeOf((*ast.BinaryExpr)(nil)),
- reflect.TypeOf((*ast.ForStmt)(nil)),
- reflect.TypeOf((*ast.ArrayType)(nil)),
- reflect.TypeOf((*ast.DeferStmt)(nil)),
- reflect.TypeOf((*ast.MapType)(nil)),
- reflect.TypeOf((*ast.ReturnStmt)(nil)),
- reflect.TypeOf((*ast.SliceExpr)(nil)),
- reflect.TypeOf((*ast.StarExpr)(nil)),
- reflect.TypeOf((*ast.UnaryExpr)(nil)),
- reflect.TypeOf((*ast.SendStmt)(nil)),
- reflect.TypeOf((*ast.SelectStmt)(nil)),
- reflect.TypeOf((*ast.ImportSpec)(nil)),
- reflect.TypeOf((*ast.IfStmt)(nil)),
- reflect.TypeOf((*ast.GoStmt)(nil)),
- reflect.TypeOf((*ast.Field)(nil)),
- reflect.TypeOf((*ast.SelectorExpr)(nil)),
- reflect.TypeOf((*ast.StructType)(nil)),
- reflect.TypeOf((*ast.KeyValueExpr)(nil)),
- reflect.TypeOf((*ast.FuncType)(nil)),
- reflect.TypeOf((*ast.FuncLit)(nil)),
- reflect.TypeOf((*ast.FuncDecl)(nil)),
- reflect.TypeOf((*ast.ChanType)(nil)),
- reflect.TypeOf((*ast.CallExpr)(nil)),
- reflect.TypeOf((*ast.CaseClause)(nil)),
- reflect.TypeOf((*ast.CommClause)(nil)),
- reflect.TypeOf((*ast.CompositeLit)(nil)),
- reflect.TypeOf((*ast.EmptyStmt)(nil)),
- reflect.TypeOf((*ast.SwitchStmt)(nil)),
- reflect.TypeOf((*ast.TypeSwitchStmt)(nil)),
- reflect.TypeOf((*ast.TypeAssertExpr)(nil)),
- reflect.TypeOf((*ast.TypeSpec)(nil)),
- reflect.TypeOf((*ast.InterfaceType)(nil)),
- reflect.TypeOf((*ast.BranchStmt)(nil)),
- reflect.TypeOf((*ast.IncDecStmt)(nil)),
- reflect.TypeOf((*ast.BasicLit)(nil)),
-}
-
-var nodeToASTTypes = map[reflect.Type][]reflect.Type{
- reflect.TypeOf(String("")): nil,
- reflect.TypeOf(Token(0)): nil,
- reflect.TypeOf(List{}): {reflect.TypeOf((*ast.BlockStmt)(nil)), reflect.TypeOf((*ast.FieldList)(nil))},
- reflect.TypeOf(Builtin{}): {reflect.TypeOf((*ast.Ident)(nil))},
- reflect.TypeOf(Object{}): {reflect.TypeOf((*ast.Ident)(nil))},
- reflect.TypeOf(Function{}): {reflect.TypeOf((*ast.Ident)(nil)), reflect.TypeOf((*ast.SelectorExpr)(nil))},
- reflect.TypeOf(Any{}): allTypes,
- reflect.TypeOf(RangeStmt{}): {reflect.TypeOf((*ast.RangeStmt)(nil))},
- reflect.TypeOf(AssignStmt{}): {reflect.TypeOf((*ast.AssignStmt)(nil))},
- reflect.TypeOf(IndexExpr{}): {reflect.TypeOf((*ast.IndexExpr)(nil))},
- reflect.TypeOf(Ident{}): {reflect.TypeOf((*ast.Ident)(nil))},
- reflect.TypeOf(ValueSpec{}): {reflect.TypeOf((*ast.ValueSpec)(nil))},
- reflect.TypeOf(GenDecl{}): {reflect.TypeOf((*ast.GenDecl)(nil))},
- reflect.TypeOf(BinaryExpr{}): {reflect.TypeOf((*ast.BinaryExpr)(nil))},
- reflect.TypeOf(ForStmt{}): {reflect.TypeOf((*ast.ForStmt)(nil))},
- reflect.TypeOf(ArrayType{}): {reflect.TypeOf((*ast.ArrayType)(nil))},
- reflect.TypeOf(DeferStmt{}): {reflect.TypeOf((*ast.DeferStmt)(nil))},
- reflect.TypeOf(MapType{}): {reflect.TypeOf((*ast.MapType)(nil))},
- reflect.TypeOf(ReturnStmt{}): {reflect.TypeOf((*ast.ReturnStmt)(nil))},
- reflect.TypeOf(SliceExpr{}): {reflect.TypeOf((*ast.SliceExpr)(nil))},
- reflect.TypeOf(StarExpr{}): {reflect.TypeOf((*ast.StarExpr)(nil))},
- reflect.TypeOf(UnaryExpr{}): {reflect.TypeOf((*ast.UnaryExpr)(nil))},
- reflect.TypeOf(SendStmt{}): {reflect.TypeOf((*ast.SendStmt)(nil))},
- reflect.TypeOf(SelectStmt{}): {reflect.TypeOf((*ast.SelectStmt)(nil))},
- reflect.TypeOf(ImportSpec{}): {reflect.TypeOf((*ast.ImportSpec)(nil))},
- reflect.TypeOf(IfStmt{}): {reflect.TypeOf((*ast.IfStmt)(nil))},
- reflect.TypeOf(GoStmt{}): {reflect.TypeOf((*ast.GoStmt)(nil))},
- reflect.TypeOf(Field{}): {reflect.TypeOf((*ast.Field)(nil))},
- reflect.TypeOf(SelectorExpr{}): {reflect.TypeOf((*ast.SelectorExpr)(nil))},
- reflect.TypeOf(StructType{}): {reflect.TypeOf((*ast.StructType)(nil))},
- reflect.TypeOf(KeyValueExpr{}): {reflect.TypeOf((*ast.KeyValueExpr)(nil))},
- reflect.TypeOf(FuncType{}): {reflect.TypeOf((*ast.FuncType)(nil))},
- reflect.TypeOf(FuncLit{}): {reflect.TypeOf((*ast.FuncLit)(nil))},
- reflect.TypeOf(FuncDecl{}): {reflect.TypeOf((*ast.FuncDecl)(nil))},
- reflect.TypeOf(ChanType{}): {reflect.TypeOf((*ast.ChanType)(nil))},
- reflect.TypeOf(CallExpr{}): {reflect.TypeOf((*ast.CallExpr)(nil))},
- reflect.TypeOf(CaseClause{}): {reflect.TypeOf((*ast.CaseClause)(nil))},
- reflect.TypeOf(CommClause{}): {reflect.TypeOf((*ast.CommClause)(nil))},
- reflect.TypeOf(CompositeLit{}): {reflect.TypeOf((*ast.CompositeLit)(nil))},
- reflect.TypeOf(EmptyStmt{}): {reflect.TypeOf((*ast.EmptyStmt)(nil))},
- reflect.TypeOf(SwitchStmt{}): {reflect.TypeOf((*ast.SwitchStmt)(nil))},
- reflect.TypeOf(TypeSwitchStmt{}): {reflect.TypeOf((*ast.TypeSwitchStmt)(nil))},
- reflect.TypeOf(TypeAssertExpr{}): {reflect.TypeOf((*ast.TypeAssertExpr)(nil))},
- reflect.TypeOf(TypeSpec{}): {reflect.TypeOf((*ast.TypeSpec)(nil))},
- reflect.TypeOf(InterfaceType{}): {reflect.TypeOf((*ast.InterfaceType)(nil))},
- reflect.TypeOf(BranchStmt{}): {reflect.TypeOf((*ast.BranchStmt)(nil))},
- reflect.TypeOf(IncDecStmt{}): {reflect.TypeOf((*ast.IncDecStmt)(nil))},
- reflect.TypeOf(BasicLit{}): {reflect.TypeOf((*ast.BasicLit)(nil))},
-}
-
-var requiresTypeInfo = map[string]bool{
- "Function": true,
- "Builtin": true,
- "Object": true,
-}
-
-type Parser struct {
- // Allow nodes that rely on type information
- AllowTypeInfo bool
-
- lex *lexer
- cur item
- last *item
- items chan item
-}
-
-func (p *Parser) Parse(s string) (Pattern, error) {
- p.cur = item{}
- p.last = nil
- p.items = nil
-
- fset := token.NewFileSet()
- p.lex = &lexer{
- f: fset.AddFile(" ", -1, len(s)),
- input: s,
- items: make(chan item),
- }
- go p.lex.run()
- p.items = p.lex.items
- root, err := p.node()
- if err != nil {
- // drain lexer if parsing failed
- for range p.lex.items {
- }
- return Pattern{}, err
- }
- if item := <-p.lex.items; item.typ != itemEOF {
- return Pattern{}, fmt.Errorf("unexpected token %s after end of pattern", item.typ)
- }
- return Pattern{
- Root: root,
- Relevant: roots(root),
- }, nil
-}
-
-func (p *Parser) next() item {
- if p.last != nil {
- n := *p.last
- p.last = nil
- return n
- }
- var ok bool
- p.cur, ok = <-p.items
- if !ok {
- p.cur = item{typ: eof}
- }
- return p.cur
-}
-
-func (p *Parser) rewind() {
- p.last = &p.cur
-}
-
-func (p *Parser) peek() item {
- n := p.next()
- p.rewind()
- return n
-}
-
-func (p *Parser) accept(typ itemType) (item, bool) {
- n := p.next()
- if n.typ == typ {
- return n, true
- }
- p.rewind()
- return item{}, false
-}
-
-func (p *Parser) unexpectedToken(valid string) error {
- if p.cur.typ == itemError {
- return fmt.Errorf("error lexing input: %s", p.cur.val)
- }
- var got string
- switch p.cur.typ {
- case itemTypeName, itemVariable, itemString:
- got = p.cur.val
- default:
- got = "'" + p.cur.typ.String() + "'"
- }
-
- pos := p.lex.f.Position(token.Pos(p.cur.pos))
- return fmt.Errorf("%s: expected %s, found %s", pos, valid, got)
-}
-
-func (p *Parser) node() (Node, error) {
- if _, ok := p.accept(itemLeftParen); !ok {
- return nil, p.unexpectedToken("'('")
- }
- typ, ok := p.accept(itemTypeName)
- if !ok {
- return nil, p.unexpectedToken("Node type")
- }
-
- var objs []Node
- for {
- if _, ok := p.accept(itemRightParen); ok {
- break
- } else {
- p.rewind()
- obj, err := p.object()
- if err != nil {
- return nil, err
- }
- objs = append(objs, obj)
- }
- }
-
- return p.populateNode(typ.val, objs)
-}
-
-func populateNode(typ string, objs []Node, allowTypeInfo bool) (Node, error) {
- T, ok := structNodes[typ]
- if !ok {
- return nil, fmt.Errorf("unknown node %s", typ)
- }
-
- if !allowTypeInfo && requiresTypeInfo[typ] {
- return nil, fmt.Errorf("Node %s requires type information", typ)
- }
-
- pv := reflect.New(T)
- v := pv.Elem()
-
- if v.NumField() == 1 {
- f := v.Field(0)
- if f.Type().Kind() == reflect.Slice {
- // Variadic node
- f.Set(reflect.AppendSlice(f, reflect.ValueOf(objs)))
- return v.Interface().(Node), nil
- }
- }
- if len(objs) != v.NumField() {
- return nil, fmt.Errorf("tried to initialize node %s with %d values, expected %d", typ, len(objs), v.NumField())
- }
- for i := 0; i < v.NumField(); i++ {
- f := v.Field(i)
- if f.Kind() == reflect.String {
- if obj, ok := objs[i].(String); ok {
- f.Set(reflect.ValueOf(string(obj)))
- } else {
- return nil, fmt.Errorf("first argument of (Binding name node) must be string, but got %s", objs[i])
- }
- } else {
- f.Set(reflect.ValueOf(objs[i]))
- }
- }
- return v.Interface().(Node), nil
-}
-
-func (p *Parser) populateNode(typ string, objs []Node) (Node, error) {
- return populateNode(typ, objs, p.AllowTypeInfo)
-}
-
-var structNodes = map[string]reflect.Type{
- "Any": reflect.TypeOf(Any{}),
- "Ellipsis": reflect.TypeOf(Ellipsis{}),
- "List": reflect.TypeOf(List{}),
- "Binding": reflect.TypeOf(Binding{}),
- "RangeStmt": reflect.TypeOf(RangeStmt{}),
- "AssignStmt": reflect.TypeOf(AssignStmt{}),
- "IndexExpr": reflect.TypeOf(IndexExpr{}),
- "Ident": reflect.TypeOf(Ident{}),
- "Builtin": reflect.TypeOf(Builtin{}),
- "ValueSpec": reflect.TypeOf(ValueSpec{}),
- "GenDecl": reflect.TypeOf(GenDecl{}),
- "BinaryExpr": reflect.TypeOf(BinaryExpr{}),
- "ForStmt": reflect.TypeOf(ForStmt{}),
- "ArrayType": reflect.TypeOf(ArrayType{}),
- "DeferStmt": reflect.TypeOf(DeferStmt{}),
- "MapType": reflect.TypeOf(MapType{}),
- "ReturnStmt": reflect.TypeOf(ReturnStmt{}),
- "SliceExpr": reflect.TypeOf(SliceExpr{}),
- "StarExpr": reflect.TypeOf(StarExpr{}),
- "UnaryExpr": reflect.TypeOf(UnaryExpr{}),
- "SendStmt": reflect.TypeOf(SendStmt{}),
- "SelectStmt": reflect.TypeOf(SelectStmt{}),
- "ImportSpec": reflect.TypeOf(ImportSpec{}),
- "IfStmt": reflect.TypeOf(IfStmt{}),
- "GoStmt": reflect.TypeOf(GoStmt{}),
- "Field": reflect.TypeOf(Field{}),
- "SelectorExpr": reflect.TypeOf(SelectorExpr{}),
- "StructType": reflect.TypeOf(StructType{}),
- "KeyValueExpr": reflect.TypeOf(KeyValueExpr{}),
- "FuncType": reflect.TypeOf(FuncType{}),
- "FuncLit": reflect.TypeOf(FuncLit{}),
- "FuncDecl": reflect.TypeOf(FuncDecl{}),
- "ChanType": reflect.TypeOf(ChanType{}),
- "CallExpr": reflect.TypeOf(CallExpr{}),
- "CaseClause": reflect.TypeOf(CaseClause{}),
- "CommClause": reflect.TypeOf(CommClause{}),
- "CompositeLit": reflect.TypeOf(CompositeLit{}),
- "EmptyStmt": reflect.TypeOf(EmptyStmt{}),
- "SwitchStmt": reflect.TypeOf(SwitchStmt{}),
- "TypeSwitchStmt": reflect.TypeOf(TypeSwitchStmt{}),
- "TypeAssertExpr": reflect.TypeOf(TypeAssertExpr{}),
- "TypeSpec": reflect.TypeOf(TypeSpec{}),
- "InterfaceType": reflect.TypeOf(InterfaceType{}),
- "BranchStmt": reflect.TypeOf(BranchStmt{}),
- "IncDecStmt": reflect.TypeOf(IncDecStmt{}),
- "BasicLit": reflect.TypeOf(BasicLit{}),
- "Object": reflect.TypeOf(Object{}),
- "Function": reflect.TypeOf(Function{}),
- "Or": reflect.TypeOf(Or{}),
- "Not": reflect.TypeOf(Not{}),
-}
-
-func (p *Parser) object() (Node, error) {
- n := p.next()
- switch n.typ {
- case itemLeftParen:
- p.rewind()
- node, err := p.node()
- if err != nil {
- return node, err
- }
- if p.peek().typ == itemColon {
- p.next()
- tail, err := p.object()
- if err != nil {
- return node, err
- }
- return List{Head: node, Tail: tail}, nil
- }
- return node, nil
- case itemLeftBracket:
- p.rewind()
- return p.array()
- case itemVariable:
- v := n
- if v.val == "nil" {
- return Nil{}, nil
- }
- var b Binding
- if _, ok := p.accept(itemAt); ok {
- o, err := p.node()
- if err != nil {
- return nil, err
- }
- b = Binding{
- Name: v.val,
- Node: o,
- }
- } else {
- p.rewind()
- b = Binding{Name: v.val}
- }
- if p.peek().typ == itemColon {
- p.next()
- tail, err := p.object()
- if err != nil {
- return b, err
- }
- return List{Head: b, Tail: tail}, nil
- }
- return b, nil
- case itemBlank:
- return Any{}, nil
- case itemString:
- return String(n.val), nil
- default:
- return nil, p.unexpectedToken("object")
- }
-}
-
-func (p *Parser) array() (Node, error) {
- if _, ok := p.accept(itemLeftBracket); !ok {
- return nil, p.unexpectedToken("'['")
- }
-
- var objs []Node
- for {
- if _, ok := p.accept(itemRightBracket); ok {
- break
- } else {
- p.rewind()
- obj, err := p.object()
- if err != nil {
- return nil, err
- }
- objs = append(objs, obj)
- }
- }
-
- tail := List{}
- for i := len(objs) - 1; i >= 0; i-- {
- l := List{
- Head: objs[i],
- Tail: tail,
- }
- tail = l
- }
- return tail, nil
-}
-
-/*
-Node ::= itemLeftParen itemTypeName Object* itemRightParen
-Object ::= Node | Array | Binding | itemVariable | itemBlank | itemString
-Array := itemLeftBracket Object* itemRightBracket
-Array := Object itemColon Object
-Binding ::= itemVariable itemAt Node
-*/
diff --git a/vendor/honnef.co/go/tools/pattern/pattern.go b/vendor/honnef.co/go/tools/pattern/pattern.go
deleted file mode 100644
index d7460560203..00000000000
--- a/vendor/honnef.co/go/tools/pattern/pattern.go
+++ /dev/null
@@ -1,497 +0,0 @@
-package pattern
-
-import (
- "fmt"
- "go/token"
- "reflect"
- "strings"
-)
-
-var (
- _ Node = Ellipsis{}
- _ Node = Binding{}
- _ Node = RangeStmt{}
- _ Node = AssignStmt{}
- _ Node = IndexExpr{}
- _ Node = Ident{}
- _ Node = Builtin{}
- _ Node = String("")
- _ Node = Any{}
- _ Node = ValueSpec{}
- _ Node = List{}
- _ Node = GenDecl{}
- _ Node = BinaryExpr{}
- _ Node = ForStmt{}
- _ Node = ArrayType{}
- _ Node = DeferStmt{}
- _ Node = MapType{}
- _ Node = ReturnStmt{}
- _ Node = SliceExpr{}
- _ Node = StarExpr{}
- _ Node = UnaryExpr{}
- _ Node = SendStmt{}
- _ Node = SelectStmt{}
- _ Node = ImportSpec{}
- _ Node = IfStmt{}
- _ Node = GoStmt{}
- _ Node = Field{}
- _ Node = SelectorExpr{}
- _ Node = StructType{}
- _ Node = KeyValueExpr{}
- _ Node = FuncType{}
- _ Node = FuncLit{}
- _ Node = FuncDecl{}
- _ Node = Token(0)
- _ Node = ChanType{}
- _ Node = CallExpr{}
- _ Node = CaseClause{}
- _ Node = CommClause{}
- _ Node = CompositeLit{}
- _ Node = EmptyStmt{}
- _ Node = SwitchStmt{}
- _ Node = TypeSwitchStmt{}
- _ Node = TypeAssertExpr{}
- _ Node = TypeSpec{}
- _ Node = InterfaceType{}
- _ Node = BranchStmt{}
- _ Node = IncDecStmt{}
- _ Node = BasicLit{}
- _ Node = Nil{}
- _ Node = Object{}
- _ Node = Function{}
- _ Node = Not{}
- _ Node = Or{}
-)
-
-type Function struct {
- Name Node
-}
-
-type Token token.Token
-
-type Nil struct {
-}
-
-type Ellipsis struct {
- Elt Node
-}
-
-type IncDecStmt struct {
- X Node
- Tok Node
-}
-
-type BranchStmt struct {
- Tok Node
- Label Node
-}
-
-type InterfaceType struct {
- Methods Node
-}
-
-type TypeSpec struct {
- Name Node
- Type Node
-}
-
-type TypeAssertExpr struct {
- X Node
- Type Node
-}
-
-type TypeSwitchStmt struct {
- Init Node
- Assign Node
- Body Node
-}
-
-type SwitchStmt struct {
- Init Node
- Tag Node
- Body Node
-}
-
-type EmptyStmt struct {
-}
-
-type CompositeLit struct {
- Type Node
- Elts Node
-}
-
-type CommClause struct {
- Comm Node
- Body Node
-}
-
-type CaseClause struct {
- List Node
- Body Node
-}
-
-type CallExpr struct {
- Fun Node
- Args Node
- // XXX handle ellipsis
-}
-
-// TODO(dh): add a ChanDir node, and a way of instantiating it.
-
-type ChanType struct {
- Dir Node
- Value Node
-}
-
-type FuncDecl struct {
- Recv Node
- Name Node
- Type Node
- Body Node
-}
-
-type FuncLit struct {
- Type Node
- Body Node
-}
-
-type FuncType struct {
- Params Node
- Results Node
-}
-
-type KeyValueExpr struct {
- Key Node
- Value Node
-}
-
-type StructType struct {
- Fields Node
-}
-
-type SelectorExpr struct {
- X Node
- Sel Node
-}
-
-type Field struct {
- Names Node
- Type Node
- Tag Node
-}
-
-type GoStmt struct {
- Call Node
-}
-
-type IfStmt struct {
- Init Node
- Cond Node
- Body Node
- Else Node
-}
-
-type ImportSpec struct {
- Name Node
- Path Node
-}
-
-type SelectStmt struct {
- Body Node
-}
-
-type ArrayType struct {
- Len Node
- Elt Node
-}
-
-type DeferStmt struct {
- Call Node
-}
-
-type MapType struct {
- Key Node
- Value Node
-}
-
-type ReturnStmt struct {
- Results Node
-}
-
-type SliceExpr struct {
- X Node
- Low Node
- High Node
- Max Node
-}
-
-type StarExpr struct {
- X Node
-}
-
-type UnaryExpr struct {
- Op Node
- X Node
-}
-
-type SendStmt struct {
- Chan Node
- Value Node
-}
-
-type Binding struct {
- Name string
- Node Node
-}
-
-type RangeStmt struct {
- Key Node
- Value Node
- Tok Node
- X Node
- Body Node
-}
-
-type AssignStmt struct {
- Lhs Node
- Tok Node
- Rhs Node
-}
-
-type IndexExpr struct {
- X Node
- Index Node
-}
-
-type Node interface {
- String() string
- isNode()
-}
-
-type Ident struct {
- Name Node
-}
-
-type Object struct {
- Name Node
-}
-
-type Builtin struct {
- Name Node
-}
-
-type String string
-
-type Any struct{}
-
-type ValueSpec struct {
- Names Node
- Type Node
- Values Node
-}
-
-type List struct {
- Head Node
- Tail Node
-}
-
-type GenDecl struct {
- Tok Node
- Specs Node
-}
-
-type BasicLit struct {
- Kind Node
- Value Node
-}
-
-type BinaryExpr struct {
- X Node
- Op Node
- Y Node
-}
-
-type ForStmt struct {
- Init Node
- Cond Node
- Post Node
- Body Node
-}
-
-type Or struct {
- Nodes []Node
-}
-
-type Not struct {
- Node Node
-}
-
-func stringify(n Node) string {
- v := reflect.ValueOf(n)
- var parts []string
- parts = append(parts, v.Type().Name())
- for i := 0; i < v.NumField(); i++ {
- //lint:ignore S1025 false positive in staticcheck 2019.2.3
- parts = append(parts, fmt.Sprintf("%s", v.Field(i)))
- }
- return "(" + strings.Join(parts, " ") + ")"
-}
-
-func (stmt AssignStmt) String() string { return stringify(stmt) }
-func (expr IndexExpr) String() string { return stringify(expr) }
-func (id Ident) String() string { return stringify(id) }
-func (spec ValueSpec) String() string { return stringify(spec) }
-func (decl GenDecl) String() string { return stringify(decl) }
-func (lit BasicLit) String() string { return stringify(lit) }
-func (expr BinaryExpr) String() string { return stringify(expr) }
-func (stmt ForStmt) String() string { return stringify(stmt) }
-func (stmt RangeStmt) String() string { return stringify(stmt) }
-func (typ ArrayType) String() string { return stringify(typ) }
-func (stmt DeferStmt) String() string { return stringify(stmt) }
-func (typ MapType) String() string { return stringify(typ) }
-func (stmt ReturnStmt) String() string { return stringify(stmt) }
-func (expr SliceExpr) String() string { return stringify(expr) }
-func (expr StarExpr) String() string { return stringify(expr) }
-func (expr UnaryExpr) String() string { return stringify(expr) }
-func (stmt SendStmt) String() string { return stringify(stmt) }
-func (spec ImportSpec) String() string { return stringify(spec) }
-func (stmt SelectStmt) String() string { return stringify(stmt) }
-func (stmt IfStmt) String() string { return stringify(stmt) }
-func (stmt IncDecStmt) String() string { return stringify(stmt) }
-func (stmt GoStmt) String() string { return stringify(stmt) }
-func (field Field) String() string { return stringify(field) }
-func (expr SelectorExpr) String() string { return stringify(expr) }
-func (typ StructType) String() string { return stringify(typ) }
-func (expr KeyValueExpr) String() string { return stringify(expr) }
-func (typ FuncType) String() string { return stringify(typ) }
-func (lit FuncLit) String() string { return stringify(lit) }
-func (decl FuncDecl) String() string { return stringify(decl) }
-func (stmt BranchStmt) String() string { return stringify(stmt) }
-func (expr CallExpr) String() string { return stringify(expr) }
-func (clause CaseClause) String() string { return stringify(clause) }
-func (typ ChanType) String() string { return stringify(typ) }
-func (clause CommClause) String() string { return stringify(clause) }
-func (lit CompositeLit) String() string { return stringify(lit) }
-func (stmt EmptyStmt) String() string { return stringify(stmt) }
-func (typ InterfaceType) String() string { return stringify(typ) }
-func (stmt SwitchStmt) String() string { return stringify(stmt) }
-func (expr TypeAssertExpr) String() string { return stringify(expr) }
-func (spec TypeSpec) String() string { return stringify(spec) }
-func (stmt TypeSwitchStmt) String() string { return stringify(stmt) }
-func (nil Nil) String() string { return "nil" }
-func (builtin Builtin) String() string { return stringify(builtin) }
-func (obj Object) String() string { return stringify(obj) }
-func (fn Function) String() string { return stringify(fn) }
-func (el Ellipsis) String() string { return stringify(el) }
-func (not Not) String() string { return stringify(not) }
-
-func (or Or) String() string {
- s := "(Or"
- for _, node := range or.Nodes {
- s += " "
- s += node.String()
- }
- s += ")"
- return s
-}
-
-func isProperList(l List) bool {
- if l.Head == nil && l.Tail == nil {
- return true
- }
- switch tail := l.Tail.(type) {
- case nil:
- return false
- case List:
- return isProperList(tail)
- default:
- return false
- }
-}
-
-func (l List) String() string {
- if l.Head == nil && l.Tail == nil {
- return "[]"
- }
-
- if isProperList(l) {
- // pretty-print the list
- var objs []string
- for l.Head != nil {
- objs = append(objs, l.Head.String())
- l = l.Tail.(List)
- }
- return fmt.Sprintf("[%s]", strings.Join(objs, " "))
- }
-
- return fmt.Sprintf("%s:%s", l.Head, l.Tail)
-}
-
-func (bind Binding) String() string {
- if bind.Node == nil {
- return bind.Name
- }
- return fmt.Sprintf("%s@%s", bind.Name, bind.Node)
-}
-
-func (s String) String() string { return fmt.Sprintf("%q", string(s)) }
-
-func (tok Token) String() string {
- return fmt.Sprintf("%q", strings.ToUpper(token.Token(tok).String()))
-}
-
-func (Any) String() string { return "_" }
-
-func (AssignStmt) isNode() {}
-func (IndexExpr) isNode() {}
-func (Ident) isNode() {}
-func (ValueSpec) isNode() {}
-func (GenDecl) isNode() {}
-func (BasicLit) isNode() {}
-func (BinaryExpr) isNode() {}
-func (ForStmt) isNode() {}
-func (RangeStmt) isNode() {}
-func (ArrayType) isNode() {}
-func (DeferStmt) isNode() {}
-func (MapType) isNode() {}
-func (ReturnStmt) isNode() {}
-func (SliceExpr) isNode() {}
-func (StarExpr) isNode() {}
-func (UnaryExpr) isNode() {}
-func (SendStmt) isNode() {}
-func (ImportSpec) isNode() {}
-func (SelectStmt) isNode() {}
-func (IfStmt) isNode() {}
-func (IncDecStmt) isNode() {}
-func (GoStmt) isNode() {}
-func (Field) isNode() {}
-func (SelectorExpr) isNode() {}
-func (StructType) isNode() {}
-func (KeyValueExpr) isNode() {}
-func (FuncType) isNode() {}
-func (FuncLit) isNode() {}
-func (FuncDecl) isNode() {}
-func (BranchStmt) isNode() {}
-func (CallExpr) isNode() {}
-func (CaseClause) isNode() {}
-func (ChanType) isNode() {}
-func (CommClause) isNode() {}
-func (CompositeLit) isNode() {}
-func (EmptyStmt) isNode() {}
-func (InterfaceType) isNode() {}
-func (SwitchStmt) isNode() {}
-func (TypeAssertExpr) isNode() {}
-func (TypeSpec) isNode() {}
-func (TypeSwitchStmt) isNode() {}
-func (Nil) isNode() {}
-func (Builtin) isNode() {}
-func (Object) isNode() {}
-func (Function) isNode() {}
-func (Ellipsis) isNode() {}
-func (Or) isNode() {}
-func (List) isNode() {}
-func (String) isNode() {}
-func (Token) isNode() {}
-func (Any) isNode() {}
-func (Binding) isNode() {}
-func (Not) isNode() {}
diff --git a/vendor/honnef.co/go/tools/printf/fuzz.go b/vendor/honnef.co/go/tools/printf/fuzz.go
deleted file mode 100644
index 8ebf357fb42..00000000000
--- a/vendor/honnef.co/go/tools/printf/fuzz.go
+++ /dev/null
@@ -1,11 +0,0 @@
-// +build gofuzz
-
-package printf
-
-func Fuzz(data []byte) int {
- _, err := Parse(string(data))
- if err == nil {
- return 1
- }
- return 0
-}
diff --git a/vendor/honnef.co/go/tools/printf/printf.go b/vendor/honnef.co/go/tools/printf/printf.go
deleted file mode 100644
index 754db9b16d8..00000000000
--- a/vendor/honnef.co/go/tools/printf/printf.go
+++ /dev/null
@@ -1,197 +0,0 @@
-// Package printf implements a parser for fmt.Printf-style format
-// strings.
-//
-// It parses verbs according to the following syntax:
-// Numeric -> '0'-'9'
-// Letter -> 'a'-'z' | 'A'-'Z'
-// Index -> '[' Numeric+ ']'
-// Star -> '*'
-// Star -> Index '*'
-//
-// Precision -> Numeric+ | Star
-// Width -> Numeric+ | Star
-//
-// WidthAndPrecision -> Width '.' Precision
-// WidthAndPrecision -> Width '.'
-// WidthAndPrecision -> Width
-// WidthAndPrecision -> '.' Precision
-// WidthAndPrecision -> '.'
-//
-// Flag -> '+' | '-' | '#' | ' ' | '0'
-// Verb -> Letter | '%'
-//
-// Input -> '%' [ Flag+ ] [ WidthAndPrecision ] [ Index ] Verb
-package printf
-
-import (
- "errors"
- "regexp"
- "strconv"
- "strings"
-)
-
-// ErrInvalid is returned for invalid format strings or verbs.
-var ErrInvalid = errors.New("invalid format string")
-
-type Verb struct {
- Letter rune
- Flags string
-
- Width Argument
- Precision Argument
- // Which value in the argument list the verb uses.
- // -1 denotes the next argument,
- // values > 0 denote explicit arguments.
- // The value 0 denotes that no argument is consumed. This is the case for %%.
- Value int
-
- Raw string
-}
-
-// Argument is an implicit or explicit width or precision.
-type Argument interface {
- isArgument()
-}
-
-// The Default value, when no width or precision is provided.
-type Default struct{}
-
-// Zero is the implicit zero value.
-// This value may only appear for precisions in format strings like %6.f
-type Zero struct{}
-
-// Star is a * value, which may either refer to the next argument (Index == -1) or an explicit argument.
-type Star struct{ Index int }
-
-// A Literal value, such as 6 in %6d.
-type Literal int
-
-func (Default) isArgument() {}
-func (Zero) isArgument() {}
-func (Star) isArgument() {}
-func (Literal) isArgument() {}
-
-// Parse parses f and returns a list of actions.
-// An action may either be a literal string, or a Verb.
-func Parse(f string) ([]interface{}, error) {
- var out []interface{}
- for len(f) > 0 {
- if f[0] == '%' {
- v, n, err := ParseVerb(f)
- if err != nil {
- return nil, err
- }
- f = f[n:]
- out = append(out, v)
- } else {
- n := strings.IndexByte(f, '%')
- if n > -1 {
- out = append(out, f[:n])
- f = f[n:]
- } else {
- out = append(out, f)
- f = ""
- }
- }
- }
-
- return out, nil
-}
-
-func atoi(s string) int {
- n, _ := strconv.Atoi(s)
- return n
-}
-
-// ParseVerb parses the verb at the beginning of f.
-// It returns the verb, how much of the input was consumed, and an error, if any.
-func ParseVerb(f string) (Verb, int, error) {
- if len(f) < 2 {
- return Verb{}, 0, ErrInvalid
- }
- const (
- flags = 1
-
- width = 2
- widthStar = 3
- widthIndex = 5
-
- dot = 6
- prec = 7
- precStar = 8
- precIndex = 10
-
- verbIndex = 11
- verb = 12
- )
-
- m := re.FindStringSubmatch(f)
- if m == nil {
- return Verb{}, 0, ErrInvalid
- }
-
- v := Verb{
- Letter: []rune(m[verb])[0],
- Flags: m[flags],
- Raw: m[0],
- }
-
- if m[width] != "" {
- // Literal width
- v.Width = Literal(atoi(m[width]))
- } else if m[widthStar] != "" {
- // Star width
- if m[widthIndex] != "" {
- v.Width = Star{atoi(m[widthIndex])}
- } else {
- v.Width = Star{-1}
- }
- } else {
- // Default width
- v.Width = Default{}
- }
-
- if m[dot] == "" {
- // default precision
- v.Precision = Default{}
- } else {
- if m[prec] != "" {
- // Literal precision
- v.Precision = Literal(atoi(m[prec]))
- } else if m[precStar] != "" {
- // Star precision
- if m[precIndex] != "" {
- v.Precision = Star{atoi(m[precIndex])}
- } else {
- v.Precision = Star{-1}
- }
- } else {
- // Zero precision
- v.Precision = Zero{}
- }
- }
-
- if m[verb] == "%" {
- v.Value = 0
- } else if m[verbIndex] != "" {
- v.Value = atoi(m[verbIndex])
- } else {
- v.Value = -1
- }
-
- return v, len(m[0]), nil
-}
-
-const (
- flags = `([+#0 -]*)`
- verb = `([a-zA-Z%])`
- index = `(?:\[([0-9]+)\])`
- star = `((` + index + `)?\*)`
- width1 = `([0-9]+)`
- width2 = star
- width = `(?:` + width1 + `|` + width2 + `)`
- precision = width
- widthAndPrecision = `(?:(?:` + width + `)?(?:(\.)(?:` + precision + `)?)?)`
-)
-
-var re = regexp.MustCompile(`^%` + flags + widthAndPrecision + `?` + index + `?` + verb)
diff --git a/vendor/honnef.co/go/tools/report/report.go b/vendor/honnef.co/go/tools/report/report.go
deleted file mode 100644
index 9b8b6ee74ff..00000000000
--- a/vendor/honnef.co/go/tools/report/report.go
+++ /dev/null
@@ -1,184 +0,0 @@
-package report
-
-import (
- "bytes"
- "go/ast"
- "go/printer"
- "go/token"
- "strings"
-
- "golang.org/x/tools/go/analysis"
- "golang.org/x/tools/go/ast/astutil"
- "honnef.co/go/tools/facts"
- "honnef.co/go/tools/lint"
-)
-
-type Options struct {
- ShortRange bool
- FilterGenerated bool
- Fixes []analysis.SuggestedFix
- Related []analysis.RelatedInformation
-}
-
-type Option func(*Options)
-
-func ShortRange() Option {
- return func(opts *Options) {
- opts.ShortRange = true
- }
-}
-
-func FilterGenerated() Option {
- return func(opts *Options) {
- opts.FilterGenerated = true
- }
-}
-
-func Fixes(fixes ...analysis.SuggestedFix) Option {
- return func(opts *Options) {
- opts.Fixes = append(opts.Fixes, fixes...)
- }
-}
-
-func Related(node Positioner, message string) Option {
- return func(opts *Options) {
- pos, end := getRange(node, opts.ShortRange)
- r := analysis.RelatedInformation{
- Pos: pos,
- End: end,
- Message: message,
- }
- opts.Related = append(opts.Related, r)
- }
-}
-
-type Positioner interface {
- Pos() token.Pos
-}
-
-type fullPositioner interface {
- Pos() token.Pos
- End() token.Pos
-}
-
-type sourcer interface {
- Source() ast.Node
-}
-
-// shortRange returns the position and end of the main component of an
-// AST node. For nodes that have no body, the short range is identical
-// to the node's Pos and End. For nodes that do have a body, the short
-// range excludes the body.
-func shortRange(node ast.Node) (pos, end token.Pos) {
- switch node := node.(type) {
- case *ast.File:
- return node.Pos(), node.Name.End()
- case *ast.CaseClause:
- return node.Pos(), node.Colon + 1
- case *ast.CommClause:
- return node.Pos(), node.Colon + 1
- case *ast.DeferStmt:
- return node.Pos(), node.Defer + token.Pos(len("defer"))
- case *ast.ExprStmt:
- return shortRange(node.X)
- case *ast.ForStmt:
- if node.Post != nil {
- return node.For, node.Post.End()
- } else if node.Cond != nil {
- return node.For, node.Cond.End()
- } else if node.Init != nil {
- // +1 to catch the semicolon, for gofmt'ed code
- return node.Pos(), node.Init.End() + 1
- } else {
- return node.Pos(), node.For + token.Pos(len("for"))
- }
- case *ast.FuncDecl:
- return node.Pos(), node.Type.End()
- case *ast.FuncLit:
- return node.Pos(), node.Type.End()
- case *ast.GoStmt:
- if _, ok := astutil.Unparen(node.Call.Fun).(*ast.FuncLit); ok {
- return node.Pos(), node.Go + token.Pos(len("go"))
- } else {
- return node.Pos(), node.End()
- }
- case *ast.IfStmt:
- return node.Pos(), node.Cond.End()
- case *ast.RangeStmt:
- return node.Pos(), node.X.End()
- case *ast.SelectStmt:
- return node.Pos(), node.Pos() + token.Pos(len("select"))
- case *ast.SwitchStmt:
- if node.Tag != nil {
- return node.Pos(), node.Tag.End()
- } else if node.Init != nil {
- // +1 to catch the semicolon, for gofmt'ed code
- return node.Pos(), node.Init.End() + 1
- } else {
- return node.Pos(), node.Pos() + token.Pos(len("switch"))
- }
- case *ast.TypeSwitchStmt:
- return node.Pos(), node.Assign.End()
- default:
- return node.Pos(), node.End()
- }
-}
-
-func getRange(node Positioner, short bool) (pos, end token.Pos) {
- switch node := node.(type) {
- case sourcer:
- s := node.Source()
- if short {
- return shortRange(s)
- }
- return s.Pos(), s.End()
- case fullPositioner:
- if short {
- return shortRange(node)
- }
- return node.Pos(), node.End()
- default:
- return node.Pos(), token.NoPos
- }
-}
-
-func Report(pass *analysis.Pass, node Positioner, message string, opts ...Option) {
- cfg := &Options{}
- for _, opt := range opts {
- opt(cfg)
- }
-
- file := lint.DisplayPosition(pass.Fset, node.Pos()).Filename
- if cfg.FilterGenerated {
- m := pass.ResultOf[facts.Generated].(map[string]facts.Generator)
- if _, ok := m[file]; ok {
- return
- }
- }
-
- pos, end := getRange(node, cfg.ShortRange)
- d := analysis.Diagnostic{
- Pos: pos,
- End: end,
- Message: message,
- SuggestedFixes: cfg.Fixes,
- Related: cfg.Related,
- }
- pass.Report(d)
-}
-
-func Render(pass *analysis.Pass, x interface{}) string {
- var buf bytes.Buffer
- if err := printer.Fprint(&buf, pass.Fset, x); err != nil {
- panic(err)
- }
- return buf.String()
-}
-
-func RenderArgs(pass *analysis.Pass, args []ast.Expr) string {
- var ss []string
- for _, arg := range args {
- ss = append(ss, Render(pass, arg))
- }
- return strings.Join(ss, ", ")
-}
diff --git a/vendor/honnef.co/go/tools/simple/analysis.go b/vendor/honnef.co/go/tools/simple/analysis.go
deleted file mode 100644
index 9f554c310a1..00000000000
--- a/vendor/honnef.co/go/tools/simple/analysis.go
+++ /dev/null
@@ -1,148 +0,0 @@
-package simple
-
-import (
- "golang.org/x/tools/go/analysis"
- "golang.org/x/tools/go/analysis/passes/inspect"
- "honnef.co/go/tools/facts"
- "honnef.co/go/tools/internal/passes/buildir"
- "honnef.co/go/tools/lint/lintutil"
-)
-
-var Analyzers = lintutil.InitializeAnalyzers(Docs, map[string]*analysis.Analyzer{
- "S1000": {
- Run: CheckSingleCaseSelect,
- Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
- },
- "S1001": {
- Run: CheckLoopCopy,
- Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
- },
- "S1002": {
- Run: CheckIfBoolCmp,
- Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
- },
- "S1003": {
- Run: CheckStringsContains,
- Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
- },
- "S1004": {
- Run: CheckBytesCompare,
- Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
- },
- "S1005": {
- Run: CheckUnnecessaryBlank,
- Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
- },
- "S1006": {
- Run: CheckForTrue,
- Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
- },
- "S1007": {
- Run: CheckRegexpRaw,
- Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
- },
- "S1008": {
- Run: CheckIfReturn,
- Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
- },
- "S1009": {
- Run: CheckRedundantNilCheckWithLen,
- Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
- },
- "S1010": {
- Run: CheckSlicing,
- Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
- },
- "S1011": {
- Run: CheckLoopAppend,
- Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
- },
- "S1012": {
- Run: CheckTimeSince,
- Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
- },
- "S1016": {
- Run: CheckSimplerStructConversion,
- Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
- },
- "S1017": {
- Run: CheckTrim,
- Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
- },
- "S1018": {
- Run: CheckLoopSlide,
- Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
- },
- "S1019": {
- Run: CheckMakeLenCap,
- Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
- },
- "S1020": {
- Run: CheckAssertNotNil,
- Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
- },
- "S1021": {
- Run: CheckDeclareAssign,
- Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
- },
- "S1023": {
- Run: CheckRedundantBreak,
- Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
- },
- "S1024": {
- Run: CheckTimeUntil,
- Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
- },
- "S1025": {
- Run: CheckRedundantSprintf,
- Requires: []*analysis.Analyzer{buildir.Analyzer, inspect.Analyzer, facts.Generated},
- },
- "S1028": {
- Run: CheckErrorsNewSprintf,
- Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
- },
- "S1029": {
- Run: CheckRangeStringRunes,
- Requires: []*analysis.Analyzer{buildir.Analyzer},
- },
- "S1030": {
- Run: CheckBytesBufferConversions,
- Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
- },
- "S1031": {
- Run: CheckNilCheckAroundRange,
- Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
- },
- "S1032": {
- Run: CheckSortHelpers,
- Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
- },
- "S1033": {
- Run: CheckGuardedDelete,
- Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
- },
- "S1034": {
- Run: CheckSimplifyTypeSwitch,
- Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
- },
- "S1035": {
- Run: CheckRedundantCanonicalHeaderKey,
- Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
- },
- "S1036": {
- Run: CheckUnnecessaryGuard,
- Requires: []*analysis.Analyzer{inspect.Analyzer},
- },
- "S1037": {
- Run: CheckElaborateSleep,
- Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
- },
- "S1038": {
- Run: CheckPrintSprintf,
- Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
- },
- "S1039": {
- Run: CheckSprintLiteral,
- Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated},
- },
-})
diff --git a/vendor/honnef.co/go/tools/simple/doc.go b/vendor/honnef.co/go/tools/simple/doc.go
deleted file mode 100644
index 27297bf6147..00000000000
--- a/vendor/honnef.co/go/tools/simple/doc.go
+++ /dev/null
@@ -1,485 +0,0 @@
-package simple
-
-import "honnef.co/go/tools/lint"
-
-var Docs = map[string]*lint.Documentation{
- "S1000": {
- Title: `Use plain channel send or receive instead of single-case select`,
- Text: `Select statements with a single case can be replaced with a simple
-send or receive.
-
-Before:
-
- select {
- case x := <-ch:
- fmt.Println(x)
- }
-
-After:
-
- x := <-ch
- fmt.Println(x)`,
- Since: "2017.1",
- },
-
- "S1001": {
- Title: `Replace for loop with call to copy`,
- Text: `Use copy() for copying elements from one slice to another.
-
-Before:
-
- for i, x := range src {
- dst[i] = x
- }
-
-After:
-
- copy(dst, src)`,
- Since: "2017.1",
- },
-
- "S1002": {
- Title: `Omit comparison with boolean constant`,
- Text: `Before:
-
- if x == true {}
-
-After:
-
- if x {}`,
- Since: "2017.1",
- },
-
- "S1003": {
- Title: `Replace call to strings.Index with strings.Contains`,
- Text: `Before:
-
- if strings.Index(x, y) != -1 {}
-
-After:
-
- if strings.Contains(x, y) {}`,
- Since: "2017.1",
- },
-
- "S1004": {
- Title: `Replace call to bytes.Compare with bytes.Equal`,
- Text: `Before:
-
- if bytes.Compare(x, y) == 0 {}
-
-After:
-
- if bytes.Equal(x, y) {}`,
- Since: "2017.1",
- },
-
- "S1005": {
- Title: `Drop unnecessary use of the blank identifier`,
- Text: `In many cases, assigning to the blank identifier is unnecessary.
-
-Before:
-
- for _ = range s {}
- x, _ = someMap[key]
- _ = <-ch
-
-After:
-
- for range s{}
- x = someMap[key]
- <-ch`,
- Since: "2017.1",
- },
-
- "S1006": {
- Title: `Use for { ... } for infinite loops`,
- Text: `For infinite loops, using for { ... } is the most idiomatic choice.`,
- Since: "2017.1",
- },
-
- "S1007": {
- Title: `Simplify regular expression by using raw string literal`,
- Text: `Raw string literals use ` + "`" + ` instead of " and do not support
-any escape sequences. This means that the backslash (\) can be used
-freely, without the need of escaping.
-
-Since regular expressions have their own escape sequences, raw strings
-can improve their readability.
-
-Before:
-
- regexp.Compile("\\A(\\w+) profile: total \\d+\\n\\z")
-
-After:
-
- regexp.Compile(` + "`" + `\A(\w+) profile: total \d+\n\z` + "`" + `)`,
- Since: "2017.1",
- },
-
- "S1008": {
- Title: `Simplify returning boolean expression`,
- Text: `Before:
-
- if {
- return true
- }
- return false
-
-After:
-
- return `,
- Since: "2017.1",
- },
-
- "S1009": {
- Title: `Omit redundant nil check on slices`,
- Text: `The len function is defined for all slices, even nil ones, which have
-a length of zero. It is not necessary to check if a slice is not nil
-before checking that its length is not zero.
-
-Before:
-
- if x != nil && len(x) != 0 {}
-
-After:
-
- if len(x) != 0 {}`,
- Since: "2017.1",
- },
-
- "S1010": {
- Title: `Omit default slice index`,
- Text: `When slicing, the second index defaults to the length of the value,
-making s[n:len(s)] and s[n:] equivalent.`,
- Since: "2017.1",
- },
-
- "S1011": {
- Title: `Use a single append to concatenate two slices`,
- Text: `Before:
-
- for _, e := range y {
- x = append(x, e)
- }
-
-After:
-
- x = append(x, y...)`,
- Since: "2017.1",
- },
-
- "S1012": {
- Title: `Replace time.Now().Sub(x) with time.Since(x)`,
- Text: `The time.Since helper has the same effect as using time.Now().Sub(x)
-but is easier to read.
-
-Before:
-
- time.Now().Sub(x)
-
-After:
-
- time.Since(x)`,
- Since: "2017.1",
- },
-
- "S1016": {
- Title: `Use a type conversion instead of manually copying struct fields`,
- Text: `Two struct types with identical fields can be converted between each
-other. In older versions of Go, the fields had to have identical
-struct tags. Since Go 1.8, however, struct tags are ignored during
-conversions. It is thus not necessary to manually copy every field
-individually.
-
-Before:
-
- var x T1
- y := T2{
- Field1: x.Field1,
- Field2: x.Field2,
- }
-
-After:
-
- var x T1
- y := T2(x)`,
- Since: "2017.1",
- },
-
- "S1017": {
- Title: `Replace manual trimming with strings.TrimPrefix`,
- Text: `Instead of using strings.HasPrefix and manual slicing, use the
-strings.TrimPrefix function. If the string doesn't start with the
-prefix, the original string will be returned. Using strings.TrimPrefix
-reduces complexity, and avoids common bugs, such as off-by-one
-mistakes.
-
-Before:
-
- if strings.HasPrefix(str, prefix) {
- str = str[len(prefix):]
- }
-
-After:
-
- str = strings.TrimPrefix(str, prefix)`,
- Since: "2017.1",
- },
-
- "S1018": {
- Title: `Use copy for sliding elements`,
- Text: `copy() permits using the same source and destination slice, even with
-overlapping ranges. This makes it ideal for sliding elements in a
-slice.
-
-Before:
-
- for i := 0; i < n; i++ {
- bs[i] = bs[offset+i]
- }
-
-After:
-
- copy(bs[:n], bs[offset:])`,
- Since: "2017.1",
- },
-
- "S1019": {
- Title: `Simplify make call by omitting redundant arguments`,
- Text: `The make function has default values for the length and capacity
-arguments. For channels and maps, the length defaults to zero.
-Additionally, for slices the capacity defaults to the length.`,
- Since: "2017.1",
- },
-
- "S1020": {
- Title: `Omit redundant nil check in type assertion`,
- Text: `Before:
-
- if _, ok := i.(T); ok && i != nil {}
-
-After:
-
- if _, ok := i.(T); ok {}`,
- Since: "2017.1",
- },
-
- "S1021": {
- Title: `Merge variable declaration and assignment`,
- Text: `Before:
-
- var x uint
- x = 1
-
-After:
-
- var x uint = 1`,
- Since: "2017.1",
- },
-
- "S1023": {
- Title: `Omit redundant control flow`,
- Text: `Functions that have no return value do not need a return statement as
-the final statement of the function.
-
-Switches in Go do not have automatic fallthrough, unlike languages
-like C. It is not necessary to have a break statement as the final
-statement in a case block.`,
- Since: "2017.1",
- },
-
- "S1024": {
- Title: `Replace x.Sub(time.Now()) with time.Until(x)`,
- Text: `The time.Until helper has the same effect as using x.Sub(time.Now())
-but is easier to read.
-
-Before:
-
- x.Sub(time.Now())
-
-After:
-
- time.Until(x)`,
- Since: "2017.1",
- },
-
- "S1025": {
- Title: `Don't use fmt.Sprintf("%s", x) unnecessarily`,
- Text: `In many instances, there are easier and more efficient ways of getting
-a value's string representation. Whenever a value's underlying type is
-a string already, or the type has a String method, they should be used
-directly.
-
-Given the following shared definitions
-
- type T1 string
- type T2 int
-
- func (T2) String() string { return "Hello, world" }
-
- var x string
- var y T1
- var z T2
-
-we can simplify the following
-
- fmt.Sprintf("%s", x)
- fmt.Sprintf("%s", y)
- fmt.Sprintf("%s", z)
-
-to
-
- x
- string(y)
- z.String()`,
- Since: "2017.1",
- },
-
- "S1028": {
- Title: `Simplify error construction with fmt.Errorf`,
- Text: `Before:
-
- errors.New(fmt.Sprintf(...))
-
-After:
-
- fmt.Errorf(...)`,
- Since: "2017.1",
- },
-
- "S1029": {
- Title: `Range over the string directly`,
- Text: `Ranging over a string will yield byte offsets and runes. If the offset
-isn't used, this is functionally equivalent to converting the string
-to a slice of runes and ranging over that. Ranging directly over the
-string will be more performant, however, as it avoids allocating a new
-slice, the size of which depends on the length of the string.
-
-Before:
-
- for _, r := range []rune(s) {}
-
-After:
-
- for _, r := range s {}`,
- Since: "2017.1",
- },
-
- "S1030": {
- Title: `Use bytes.Buffer.String or bytes.Buffer.Bytes`,
- Text: `bytes.Buffer has both a String and a Bytes method. It is never
-necessary to use string(buf.Bytes()) or []byte(buf.String()) – simply
-use the other method.`,
- Since: "2017.1",
- },
-
- "S1031": {
- Title: `Omit redundant nil check around loop`,
- Text: `You can use range on nil slices and maps, the loop will simply never
-execute. This makes an additional nil check around the loop
-unnecessary.
-
-Before:
-
- if s != nil {
- for _, x := range s {
- ...
- }
- }
-
-After:
-
- for _, x := range s {
- ...
- }`,
- Since: "2017.1",
- },
-
- "S1032": {
- Title: `Use sort.Ints(x), sort.Float64s(x), and sort.Strings(x)`,
- Text: `The sort.Ints, sort.Float64s and sort.Strings functions are easier to
-read than sort.Sort(sort.IntSlice(x)), sort.Sort(sort.Float64Slice(x))
-and sort.Sort(sort.StringSlice(x)).
-
-Before:
-
- sort.Sort(sort.StringSlice(x))
-
-After:
-
- sort.Strings(x)`,
- Since: "2019.1",
- },
-
- "S1033": {
- Title: `Unnecessary guard around call to delete`,
- Text: `Calling delete on a nil map is a no-op.`,
- Since: "2019.2",
- },
-
- "S1034": {
- Title: `Use result of type assertion to simplify cases`,
- Since: "2019.2",
- },
-
- "S1035": {
- Title: `Redundant call to net/http.CanonicalHeaderKey in method call on net/http.Header`,
- Text: `The methods on net/http.Header, namely Add, Del, Get and Set, already
-canonicalize the given header name.`,
- Since: "2020.1",
- },
-
- "S1036": {
- Title: `Unnecessary guard around map access`,
-
- Text: `When accessing a map key that doesn't exist yet, one
-receives a zero value. Often, the zero value is a suitable value, for example when using append or doing integer math.
-
-The following
-
- if _, ok := m["foo"]; ok {
- m["foo"] = append(m["foo"], "bar")
- } else {
- m["foo"] = []string{"bar"}
- }
-
-can be simplified to
-
- m["foo"] = append(m["foo"], "bar")
-
-and
-
- if _, ok := m2["k"]; ok {
- m2["k"] += 4
- } else {
- m2["k"] = 4
- }
-
-can be simplified to
-
- m["k"] += 4
-`,
- Since: "2020.1",
- },
-
- "S1037": {
- Title: `Elaborate way of sleeping`,
- Text: `Using a select statement with a single case receiving
-from the result of time.After is a very elaborate way of sleeping that
-can much simpler be expressed with a simple call to time.Sleep.`,
- Since: "2020.1",
- },
-
- "S1038": {
- Title: "Unnecessarily complex way of printing formatted string",
- Text: `Instead of using fmt.Print(fmt.Sprintf(...)), one can use fmt.Printf(...).`,
- Since: "2020.1",
- },
-
- "S1039": {
- Title: "Unnecessary use of fmt.Sprint",
- Text: `Calling fmt.Sprint with a single string argument is unnecessary and identical to using the string directly.`,
- Since: "2020.1",
- },
-}
diff --git a/vendor/honnef.co/go/tools/simple/lint.go b/vendor/honnef.co/go/tools/simple/lint.go
deleted file mode 100644
index 91571b6ee76..00000000000
--- a/vendor/honnef.co/go/tools/simple/lint.go
+++ /dev/null
@@ -1,1868 +0,0 @@
-// Package simple contains a linter for Go source code.
-package simple // import "honnef.co/go/tools/simple"
-
-import (
- "fmt"
- "go/ast"
- "go/constant"
- "go/token"
- "go/types"
- "path/filepath"
- "reflect"
- "sort"
- "strings"
-
- "golang.org/x/tools/go/analysis"
- "golang.org/x/tools/go/types/typeutil"
- . "honnef.co/go/tools/arg"
- "honnef.co/go/tools/code"
- "honnef.co/go/tools/edit"
- "honnef.co/go/tools/internal/passes/buildir"
- "honnef.co/go/tools/internal/sharedcheck"
- . "honnef.co/go/tools/lint/lintdsl"
- "honnef.co/go/tools/pattern"
- "honnef.co/go/tools/report"
-)
-
-var (
- checkSingleCaseSelectQ1 = pattern.MustParse(`
- (ForStmt
- nil nil nil
- select@(SelectStmt
- (CommClause
- (Or
- (UnaryExpr "<-" _)
- (AssignStmt _ _ (UnaryExpr "<-" _)))
- _)))`)
- checkSingleCaseSelectQ2 = pattern.MustParse(`(SelectStmt (CommClause _ _))`)
-)
-
-func CheckSingleCaseSelect(pass *analysis.Pass) (interface{}, error) {
- seen := map[ast.Node]struct{}{}
- fn := func(node ast.Node) {
- if m, ok := Match(pass, checkSingleCaseSelectQ1, node); ok {
- seen[m.State["select"].(ast.Node)] = struct{}{}
- report.Report(pass, node, "should use for range instead of for { select {} }", report.FilterGenerated())
- } else if _, ok := Match(pass, checkSingleCaseSelectQ2, node); ok {
- if _, ok := seen[node]; !ok {
- report.Report(pass, node, "should use a simple channel send/receive instead of select with a single case",
- report.ShortRange(),
- report.FilterGenerated())
- }
- }
- }
- code.Preorder(pass, fn, (*ast.ForStmt)(nil), (*ast.SelectStmt)(nil))
- return nil, nil
-}
-
-var (
- checkLoopCopyQ = pattern.MustParse(`
- (Or
- (RangeStmt
- key value ":=" src@(Ident _)
- [(AssignStmt
- (IndexExpr dst@(Ident _) key)
- "="
- value)])
- (RangeStmt
- key nil ":=" src@(Ident _)
- [(AssignStmt
- (IndexExpr dst@(Ident _) key)
- "="
- (IndexExpr src key))]))`)
- checkLoopCopyR = pattern.MustParse(`(CallExpr (Ident "copy") [dst src])`)
-)
-
-func CheckLoopCopy(pass *analysis.Pass) (interface{}, error) {
- fn := func(node ast.Node) {
- m, edits, ok := MatchAndEdit(pass, checkLoopCopyQ, checkLoopCopyR, node)
- if !ok {
- return
- }
- t1 := pass.TypesInfo.TypeOf(m.State["src"].(*ast.Ident))
- t2 := pass.TypesInfo.TypeOf(m.State["dst"].(*ast.Ident))
- if _, ok := t1.Underlying().(*types.Slice); !ok {
- return
- }
- if !types.Identical(t1, t2) {
- return
- }
-
- tv, err := types.Eval(pass.Fset, pass.Pkg, node.Pos(), "copy")
- if err == nil && tv.IsBuiltin() {
- report.Report(pass, node,
- "should use copy() instead of a loop",
- report.ShortRange(),
- report.FilterGenerated(),
- report.Fixes(edit.Fix("replace loop with call to copy()", edits...)))
- } else {
- report.Report(pass, node, "should use copy() instead of a loop", report.FilterGenerated())
- }
- }
- code.Preorder(pass, fn, (*ast.RangeStmt)(nil))
- return nil, nil
-}
-
-func CheckIfBoolCmp(pass *analysis.Pass) (interface{}, error) {
- fn := func(node ast.Node) {
- if code.IsInTest(pass, node) {
- return
- }
-
- expr := node.(*ast.BinaryExpr)
- if expr.Op != token.EQL && expr.Op != token.NEQ {
- return
- }
- x := code.IsBoolConst(pass, expr.X)
- y := code.IsBoolConst(pass, expr.Y)
- if !x && !y {
- return
- }
- var other ast.Expr
- var val bool
- if x {
- val = code.BoolConst(pass, expr.X)
- other = expr.Y
- } else {
- val = code.BoolConst(pass, expr.Y)
- other = expr.X
- }
- basic, ok := pass.TypesInfo.TypeOf(other).Underlying().(*types.Basic)
- if !ok || basic.Kind() != types.Bool {
- return
- }
- op := ""
- if (expr.Op == token.EQL && !val) || (expr.Op == token.NEQ && val) {
- op = "!"
- }
- r := op + report.Render(pass, other)
- l1 := len(r)
- r = strings.TrimLeft(r, "!")
- if (l1-len(r))%2 == 1 {
- r = "!" + r
- }
- report.Report(pass, expr, fmt.Sprintf("should omit comparison to bool constant, can be simplified to %s", r),
- report.FilterGenerated(),
- report.Fixes(edit.Fix("simplify bool comparison", edit.ReplaceWithString(pass.Fset, expr, r))))
- }
- code.Preorder(pass, fn, (*ast.BinaryExpr)(nil))
- return nil, nil
-}
-
-var (
- checkBytesBufferConversionsQ = pattern.MustParse(`(CallExpr _ [(CallExpr sel@(SelectorExpr recv _) [])])`)
- checkBytesBufferConversionsRs = pattern.MustParse(`(CallExpr (SelectorExpr recv (Ident "String")) [])`)
- checkBytesBufferConversionsRb = pattern.MustParse(`(CallExpr (SelectorExpr recv (Ident "Bytes")) [])`)
-)
-
-func CheckBytesBufferConversions(pass *analysis.Pass) (interface{}, error) {
- if pass.Pkg.Path() == "bytes" || pass.Pkg.Path() == "bytes_test" {
- // The bytes package can use itself however it wants
- return nil, nil
- }
- fn := func(node ast.Node) {
- m, ok := Match(pass, checkBytesBufferConversionsQ, node)
- if !ok {
- return
- }
- call := node.(*ast.CallExpr)
- sel := m.State["sel"].(*ast.SelectorExpr)
-
- typ := pass.TypesInfo.TypeOf(call.Fun)
- if typ == types.Universe.Lookup("string").Type() && code.IsCallToAST(pass, call.Args[0], "(*bytes.Buffer).Bytes") {
- report.Report(pass, call, fmt.Sprintf("should use %v.String() instead of %v", report.Render(pass, sel.X), report.Render(pass, call)),
- report.FilterGenerated(),
- report.Fixes(edit.Fix("simplify conversion", edit.ReplaceWithPattern(pass, checkBytesBufferConversionsRs, m.State, node))))
- } else if typ, ok := typ.(*types.Slice); ok && typ.Elem() == types.Universe.Lookup("byte").Type() && code.IsCallToAST(pass, call.Args[0], "(*bytes.Buffer).String") {
- report.Report(pass, call, fmt.Sprintf("should use %v.Bytes() instead of %v", report.Render(pass, sel.X), report.Render(pass, call)),
- report.FilterGenerated(),
- report.Fixes(edit.Fix("simplify conversion", edit.ReplaceWithPattern(pass, checkBytesBufferConversionsRb, m.State, node))))
- }
-
- }
- code.Preorder(pass, fn, (*ast.CallExpr)(nil))
- return nil, nil
-}
-
-func CheckStringsContains(pass *analysis.Pass) (interface{}, error) {
- // map of value to token to bool value
- allowed := map[int64]map[token.Token]bool{
- -1: {token.GTR: true, token.NEQ: true, token.EQL: false},
- 0: {token.GEQ: true, token.LSS: false},
- }
- fn := func(node ast.Node) {
- expr := node.(*ast.BinaryExpr)
- switch expr.Op {
- case token.GEQ, token.GTR, token.NEQ, token.LSS, token.EQL:
- default:
- return
- }
-
- value, ok := code.ExprToInt(pass, expr.Y)
- if !ok {
- return
- }
-
- allowedOps, ok := allowed[value]
- if !ok {
- return
- }
- b, ok := allowedOps[expr.Op]
- if !ok {
- return
- }
-
- call, ok := expr.X.(*ast.CallExpr)
- if !ok {
- return
- }
- sel, ok := call.Fun.(*ast.SelectorExpr)
- if !ok {
- return
- }
- pkgIdent, ok := sel.X.(*ast.Ident)
- if !ok {
- return
- }
- funIdent := sel.Sel
- if pkgIdent.Name != "strings" && pkgIdent.Name != "bytes" {
- return
- }
-
- var r ast.Expr
- switch funIdent.Name {
- case "IndexRune":
- r = &ast.SelectorExpr{
- X: pkgIdent,
- Sel: &ast.Ident{Name: "ContainsRune"},
- }
- case "IndexAny":
- r = &ast.SelectorExpr{
- X: pkgIdent,
- Sel: &ast.Ident{Name: "ContainsAny"},
- }
- case "Index":
- r = &ast.SelectorExpr{
- X: pkgIdent,
- Sel: &ast.Ident{Name: "Contains"},
- }
- default:
- return
- }
-
- r = &ast.CallExpr{
- Fun: r,
- Args: call.Args,
- }
- if !b {
- r = &ast.UnaryExpr{
- Op: token.NOT,
- X: r,
- }
- }
-
- report.Report(pass, node, fmt.Sprintf("should use %s instead", report.Render(pass, r)),
- report.FilterGenerated(),
- report.Fixes(edit.Fix(fmt.Sprintf("simplify use of %s", report.Render(pass, call.Fun)), edit.ReplaceWithNode(pass.Fset, node, r))))
- }
- code.Preorder(pass, fn, (*ast.BinaryExpr)(nil))
- return nil, nil
-}
-
-var (
- checkBytesCompareQ = pattern.MustParse(`(BinaryExpr (CallExpr (Function "bytes.Compare") args) op@(Or "==" "!=") (BasicLit "INT" "0"))`)
- checkBytesCompareRn = pattern.MustParse(`(CallExpr (SelectorExpr (Ident "bytes") (Ident "Equal")) args)`)
- checkBytesCompareRe = pattern.MustParse(`(UnaryExpr "!" (CallExpr (SelectorExpr (Ident "bytes") (Ident "Equal")) args))`)
-)
-
-func CheckBytesCompare(pass *analysis.Pass) (interface{}, error) {
- if pass.Pkg.Path() == "bytes" || pass.Pkg.Path() == "bytes_test" {
- // the bytes package is free to use bytes.Compare as it sees fit
- return nil, nil
- }
- fn := func(node ast.Node) {
- m, ok := Match(pass, checkBytesCompareQ, node)
- if !ok {
- return
- }
-
- args := report.RenderArgs(pass, m.State["args"].([]ast.Expr))
- prefix := ""
- if m.State["op"].(token.Token) == token.NEQ {
- prefix = "!"
- }
-
- var fix analysis.SuggestedFix
- switch tok := m.State["op"].(token.Token); tok {
- case token.EQL:
- fix = edit.Fix("simplify use of bytes.Compare", edit.ReplaceWithPattern(pass, checkBytesCompareRe, m.State, node))
- case token.NEQ:
- fix = edit.Fix("simplify use of bytes.Compare", edit.ReplaceWithPattern(pass, checkBytesCompareRn, m.State, node))
- default:
- panic(fmt.Sprintf("unexpected token %v", tok))
- }
- report.Report(pass, node, fmt.Sprintf("should use %sbytes.Equal(%s) instead", prefix, args), report.FilterGenerated(), report.Fixes(fix))
- }
- code.Preorder(pass, fn, (*ast.BinaryExpr)(nil))
- return nil, nil
-}
-
-func CheckForTrue(pass *analysis.Pass) (interface{}, error) {
- fn := func(node ast.Node) {
- loop := node.(*ast.ForStmt)
- if loop.Init != nil || loop.Post != nil {
- return
- }
- if !code.IsBoolConst(pass, loop.Cond) || !code.BoolConst(pass, loop.Cond) {
- return
- }
- report.Report(pass, loop, "should use for {} instead of for true {}",
- report.ShortRange(),
- report.FilterGenerated())
- }
- code.Preorder(pass, fn, (*ast.ForStmt)(nil))
- return nil, nil
-}
-
-func CheckRegexpRaw(pass *analysis.Pass) (interface{}, error) {
- fn := func(node ast.Node) {
- call := node.(*ast.CallExpr)
- if !code.IsCallToAnyAST(pass, call, "regexp.MustCompile", "regexp.Compile") {
- return
- }
- sel, ok := call.Fun.(*ast.SelectorExpr)
- if !ok {
- return
- }
- lit, ok := call.Args[Arg("regexp.Compile.expr")].(*ast.BasicLit)
- if !ok {
- // TODO(dominikh): support string concat, maybe support constants
- return
- }
- if lit.Kind != token.STRING {
- // invalid function call
- return
- }
- if lit.Value[0] != '"' {
- // already a raw string
- return
- }
- val := lit.Value
- if !strings.Contains(val, `\\`) {
- return
- }
- if strings.Contains(val, "`") {
- return
- }
-
- bs := false
- for _, c := range val {
- if !bs && c == '\\' {
- bs = true
- continue
- }
- if bs && c == '\\' {
- bs = false
- continue
- }
- if bs {
- // backslash followed by non-backslash -> escape sequence
- return
- }
- }
-
- report.Report(pass, call, fmt.Sprintf("should use raw string (`...`) with regexp.%s to avoid having to escape twice", sel.Sel.Name), report.FilterGenerated())
- }
- code.Preorder(pass, fn, (*ast.CallExpr)(nil))
- return nil, nil
-}
-
-var (
- checkIfReturnQIf = pattern.MustParse(`(IfStmt nil cond [(ReturnStmt [ret@(Ident _)])] nil)`)
- checkIfReturnQRet = pattern.MustParse(`(ReturnStmt [ret@(Ident _)])`)
-)
-
-func CheckIfReturn(pass *analysis.Pass) (interface{}, error) {
- fn := func(node ast.Node) {
- block := node.(*ast.BlockStmt)
- l := len(block.List)
- if l < 2 {
- return
- }
- n1, n2 := block.List[l-2], block.List[l-1]
-
- if len(block.List) >= 3 {
- if _, ok := block.List[l-3].(*ast.IfStmt); ok {
- // Do not flag a series of if statements
- return
- }
- }
- m1, ok := Match(pass, checkIfReturnQIf, n1)
- if !ok {
- return
- }
- m2, ok := Match(pass, checkIfReturnQRet, n2)
- if !ok {
- return
- }
-
- if op, ok := m1.State["cond"].(*ast.BinaryExpr); ok {
- switch op.Op {
- case token.EQL, token.LSS, token.GTR, token.NEQ, token.LEQ, token.GEQ:
- default:
- return
- }
- }
-
- ret1 := m1.State["ret"].(*ast.Ident)
- if !code.IsBoolConst(pass, ret1) {
- return
- }
- ret2 := m2.State["ret"].(*ast.Ident)
- if !code.IsBoolConst(pass, ret2) {
- return
- }
-
- if ret1.Name == ret2.Name {
- // we want the function to return true and false, not the
- // same value both times.
- return
- }
-
- cond := m1.State["cond"].(ast.Expr)
- origCond := cond
- if ret1.Name == "false" {
- cond = negate(cond)
- }
- report.Report(pass, n1,
- fmt.Sprintf("should use 'return %s' instead of 'if %s { return %s }; return %s'",
- report.Render(pass, cond),
- report.Render(pass, origCond), report.Render(pass, ret1), report.Render(pass, ret2)),
- report.FilterGenerated())
- }
- code.Preorder(pass, fn, (*ast.BlockStmt)(nil))
- return nil, nil
-}
-
-func negate(expr ast.Expr) ast.Expr {
- switch expr := expr.(type) {
- case *ast.BinaryExpr:
- out := *expr
- switch expr.Op {
- case token.EQL:
- out.Op = token.NEQ
- case token.LSS:
- out.Op = token.GEQ
- case token.GTR:
- out.Op = token.LEQ
- case token.NEQ:
- out.Op = token.EQL
- case token.LEQ:
- out.Op = token.GTR
- case token.GEQ:
- out.Op = token.LEQ
- }
- return &out
- case *ast.Ident, *ast.CallExpr, *ast.IndexExpr:
- return &ast.UnaryExpr{
- Op: token.NOT,
- X: expr,
- }
- default:
- return &ast.UnaryExpr{
- Op: token.NOT,
- X: &ast.ParenExpr{
- X: expr,
- },
- }
- }
-}
-
-// CheckRedundantNilCheckWithLen checks for the following redundant nil-checks:
-//
-// if x == nil || len(x) == 0 {}
-// if x != nil && len(x) != 0 {}
-// if x != nil && len(x) == N {} (where N != 0)
-// if x != nil && len(x) > N {}
-// if x != nil && len(x) >= N {} (where N != 0)
-//
-func CheckRedundantNilCheckWithLen(pass *analysis.Pass) (interface{}, error) {
- isConstZero := func(expr ast.Expr) (isConst bool, isZero bool) {
- _, ok := expr.(*ast.BasicLit)
- if ok {
- return true, code.IsIntLiteral(expr, "0")
- }
- id, ok := expr.(*ast.Ident)
- if !ok {
- return false, false
- }
- c, ok := pass.TypesInfo.ObjectOf(id).(*types.Const)
- if !ok {
- return false, false
- }
- return true, c.Val().Kind() == constant.Int && c.Val().String() == "0"
- }
-
- fn := func(node ast.Node) {
- // check that expr is "x || y" or "x && y"
- expr := node.(*ast.BinaryExpr)
- if expr.Op != token.LOR && expr.Op != token.LAND {
- return
- }
- eqNil := expr.Op == token.LOR
-
- // check that x is "xx == nil" or "xx != nil"
- x, ok := expr.X.(*ast.BinaryExpr)
- if !ok {
- return
- }
- if eqNil && x.Op != token.EQL {
- return
- }
- if !eqNil && x.Op != token.NEQ {
- return
- }
- xx, ok := x.X.(*ast.Ident)
- if !ok {
- return
- }
- if !code.IsNil(pass, x.Y) {
- return
- }
-
- // check that y is "len(xx) == 0" or "len(xx) ... "
- y, ok := expr.Y.(*ast.BinaryExpr)
- if !ok {
- return
- }
- if eqNil && y.Op != token.EQL { // must be len(xx) *==* 0
- return
- }
- yx, ok := y.X.(*ast.CallExpr)
- if !ok {
- return
- }
- yxFun, ok := yx.Fun.(*ast.Ident)
- if !ok || yxFun.Name != "len" || len(yx.Args) != 1 {
- return
- }
- yxArg, ok := yx.Args[Arg("len.v")].(*ast.Ident)
- if !ok {
- return
- }
- if yxArg.Name != xx.Name {
- return
- }
-
- if eqNil && !code.IsIntLiteral(y.Y, "0") { // must be len(x) == *0*
- return
- }
-
- if !eqNil {
- isConst, isZero := isConstZero(y.Y)
- if !isConst {
- return
- }
- switch y.Op {
- case token.EQL:
- // avoid false positive for "xx != nil && len(xx) == 0"
- if isZero {
- return
- }
- case token.GEQ:
- // avoid false positive for "xx != nil && len(xx) >= 0"
- if isZero {
- return
- }
- case token.NEQ:
- // avoid false positive for "xx != nil && len(xx) != "
- if !isZero {
- return
- }
- case token.GTR:
- // ok
- default:
- return
- }
- }
-
- // finally check that xx type is one of array, slice, map or chan
- // this is to prevent false positive in case if xx is a pointer to an array
- var nilType string
- switch pass.TypesInfo.TypeOf(xx).(type) {
- case *types.Slice:
- nilType = "nil slices"
- case *types.Map:
- nilType = "nil maps"
- case *types.Chan:
- nilType = "nil channels"
- default:
- return
- }
- report.Report(pass, expr, fmt.Sprintf("should omit nil check; len() for %s is defined as zero", nilType), report.FilterGenerated())
- }
- code.Preorder(pass, fn, (*ast.BinaryExpr)(nil))
- return nil, nil
-}
-
-var checkSlicingQ = pattern.MustParse(`(SliceExpr x@(Object _) low (CallExpr (Builtin "len") [x]) nil)`)
-
-func CheckSlicing(pass *analysis.Pass) (interface{}, error) {
- fn := func(node ast.Node) {
- if _, ok := Match(pass, checkSlicingQ, node); ok {
- expr := node.(*ast.SliceExpr)
- report.Report(pass, expr.High,
- "should omit second index in slice, s[a:len(s)] is identical to s[a:]",
- report.FilterGenerated(),
- report.Fixes(edit.Fix("simplify slice expression", edit.Delete(expr.High))))
- }
- }
- code.Preorder(pass, fn, (*ast.SliceExpr)(nil))
- return nil, nil
-}
-
-func refersTo(pass *analysis.Pass, expr ast.Expr, ident types.Object) bool {
- found := false
- fn := func(node ast.Node) bool {
- ident2, ok := node.(*ast.Ident)
- if !ok {
- return true
- }
- if ident == pass.TypesInfo.ObjectOf(ident2) {
- found = true
- return false
- }
- return true
- }
- ast.Inspect(expr, fn)
- return found
-}
-
-var checkLoopAppendQ = pattern.MustParse(`
- (RangeStmt
- (Ident "_")
- val@(Object _)
- _
- x
- [(AssignStmt [lhs] "=" [(CallExpr (Builtin "append") [lhs val])])]) `)
-
-func CheckLoopAppend(pass *analysis.Pass) (interface{}, error) {
- fn := func(node ast.Node) {
- m, ok := Match(pass, checkLoopAppendQ, node)
- if !ok {
- return
- }
-
- val := m.State["val"].(types.Object)
- if refersTo(pass, m.State["lhs"].(ast.Expr), val) {
- return
- }
-
- src := pass.TypesInfo.TypeOf(m.State["x"].(ast.Expr))
- dst := pass.TypesInfo.TypeOf(m.State["lhs"].(ast.Expr))
- if !types.Identical(src, dst) {
- return
- }
-
- r := &ast.AssignStmt{
- Lhs: []ast.Expr{m.State["lhs"].(ast.Expr)},
- Tok: token.ASSIGN,
- Rhs: []ast.Expr{
- &ast.CallExpr{
- Fun: &ast.Ident{Name: "append"},
- Args: []ast.Expr{
- m.State["lhs"].(ast.Expr),
- m.State["x"].(ast.Expr),
- },
- Ellipsis: 1,
- },
- },
- }
-
- report.Report(pass, node, fmt.Sprintf("should replace loop with %s", report.Render(pass, r)),
- report.ShortRange(),
- report.FilterGenerated(),
- report.Fixes(edit.Fix("replace loop with call to append", edit.ReplaceWithNode(pass.Fset, node, r))))
- }
- code.Preorder(pass, fn, (*ast.RangeStmt)(nil))
- return nil, nil
-}
-
-var (
- checkTimeSinceQ = pattern.MustParse(`(CallExpr (SelectorExpr (CallExpr (Function "time.Now") []) (Function "(time.Time).Sub")) [arg])`)
- checkTimeSinceR = pattern.MustParse(`(CallExpr (SelectorExpr (Ident "time") (Ident "Since")) [arg])`)
-)
-
-func CheckTimeSince(pass *analysis.Pass) (interface{}, error) {
- fn := func(node ast.Node) {
- if _, edits, ok := MatchAndEdit(pass, checkTimeSinceQ, checkTimeSinceR, node); ok {
- report.Report(pass, node, "should use time.Since instead of time.Now().Sub",
- report.FilterGenerated(),
- report.Fixes(edit.Fix("replace with call to time.Since", edits...)))
- }
- }
- code.Preorder(pass, fn, (*ast.CallExpr)(nil))
- return nil, nil
-}
-
-var (
- checkTimeUntilQ = pattern.MustParse(`(CallExpr (Function "(time.Time).Sub") [(CallExpr (Function "time.Now") [])])`)
- checkTimeUntilR = pattern.MustParse(`(CallExpr (SelectorExpr (Ident "time") (Ident "Until")) [arg])`)
-)
-
-func CheckTimeUntil(pass *analysis.Pass) (interface{}, error) {
- if !code.IsGoVersion(pass, 8) {
- return nil, nil
- }
- fn := func(node ast.Node) {
- if _, ok := Match(pass, checkTimeUntilQ, node); ok {
- if sel, ok := node.(*ast.CallExpr).Fun.(*ast.SelectorExpr); ok {
- r := pattern.NodeToAST(checkTimeUntilR.Root, map[string]interface{}{"arg": sel.X}).(ast.Node)
- report.Report(pass, node, "should use time.Until instead of t.Sub(time.Now())",
- report.FilterGenerated(),
- report.Fixes(edit.Fix("replace with call to time.Until", edit.ReplaceWithNode(pass.Fset, node, r))))
- } else {
- report.Report(pass, node, "should use time.Until instead of t.Sub(time.Now())", report.FilterGenerated())
- }
- }
- }
- code.Preorder(pass, fn, (*ast.CallExpr)(nil))
- return nil, nil
-}
-
-var (
- checkUnnecessaryBlankQ1 = pattern.MustParse(`
- (AssignStmt
- [_ (Ident "_")]
- _
- (Or
- (IndexExpr _ _)
- (UnaryExpr "<-" _))) `)
- checkUnnecessaryBlankQ2 = pattern.MustParse(`
- (AssignStmt
- (Ident "_") _ recv@(UnaryExpr "<-" _))`)
-)
-
-func CheckUnnecessaryBlank(pass *analysis.Pass) (interface{}, error) {
- fn1 := func(node ast.Node) {
- if _, ok := Match(pass, checkUnnecessaryBlankQ1, node); ok {
- r := *node.(*ast.AssignStmt)
- r.Lhs = r.Lhs[0:1]
- report.Report(pass, node, "unnecessary assignment to the blank identifier",
- report.FilterGenerated(),
- report.Fixes(edit.Fix("remove assignment to blank identifier", edit.ReplaceWithNode(pass.Fset, node, &r))))
- } else if m, ok := Match(pass, checkUnnecessaryBlankQ2, node); ok {
- report.Report(pass, node, "unnecessary assignment to the blank identifier",
- report.FilterGenerated(),
- report.Fixes(edit.Fix("simplify channel receive operation", edit.ReplaceWithNode(pass.Fset, node, m.State["recv"].(ast.Node)))))
- }
- }
-
- fn3 := func(node ast.Node) {
- rs := node.(*ast.RangeStmt)
-
- // for _
- if rs.Value == nil && code.IsBlank(rs.Key) {
- report.Report(pass, rs.Key, "unnecessary assignment to the blank identifier",
- report.FilterGenerated(),
- report.Fixes(edit.Fix("remove assignment to blank identifier", edit.Delete(edit.Range{rs.Key.Pos(), rs.TokPos + 1}))))
- }
-
- // for _, _
- if code.IsBlank(rs.Key) && code.IsBlank(rs.Value) {
- // FIXME we should mark both key and value
- report.Report(pass, rs.Key, "unnecessary assignment to the blank identifier",
- report.FilterGenerated(),
- report.Fixes(edit.Fix("remove assignment to blank identifier", edit.Delete(edit.Range{rs.Key.Pos(), rs.TokPos + 1}))))
- }
-
- // for x, _
- if !code.IsBlank(rs.Key) && code.IsBlank(rs.Value) {
- report.Report(pass, rs.Value, "unnecessary assignment to the blank identifier",
- report.FilterGenerated(),
- report.Fixes(edit.Fix("remove assignment to blank identifier", edit.Delete(edit.Range{rs.Key.End(), rs.Value.End()}))))
- }
- }
-
- code.Preorder(pass, fn1, (*ast.AssignStmt)(nil))
- if code.IsGoVersion(pass, 4) {
- code.Preorder(pass, fn3, (*ast.RangeStmt)(nil))
- }
- return nil, nil
-}
-
-func CheckSimplerStructConversion(pass *analysis.Pass) (interface{}, error) {
- var skip ast.Node
- fn := func(node ast.Node) {
- // Do not suggest type conversion between pointers
- if unary, ok := node.(*ast.UnaryExpr); ok && unary.Op == token.AND {
- if lit, ok := unary.X.(*ast.CompositeLit); ok {
- skip = lit
- }
- return
- }
-
- if node == skip {
- return
- }
-
- lit, ok := node.(*ast.CompositeLit)
- if !ok {
- return
- }
- typ1, _ := pass.TypesInfo.TypeOf(lit.Type).(*types.Named)
- if typ1 == nil {
- return
- }
- s1, ok := typ1.Underlying().(*types.Struct)
- if !ok {
- return
- }
-
- var typ2 *types.Named
- var ident *ast.Ident
- getSelType := func(expr ast.Expr) (types.Type, *ast.Ident, bool) {
- sel, ok := expr.(*ast.SelectorExpr)
- if !ok {
- return nil, nil, false
- }
- ident, ok := sel.X.(*ast.Ident)
- if !ok {
- return nil, nil, false
- }
- typ := pass.TypesInfo.TypeOf(sel.X)
- return typ, ident, typ != nil
- }
- if len(lit.Elts) == 0 {
- return
- }
- if s1.NumFields() != len(lit.Elts) {
- return
- }
- for i, elt := range lit.Elts {
- var t types.Type
- var id *ast.Ident
- var ok bool
- switch elt := elt.(type) {
- case *ast.SelectorExpr:
- t, id, ok = getSelType(elt)
- if !ok {
- return
- }
- if i >= s1.NumFields() || s1.Field(i).Name() != elt.Sel.Name {
- return
- }
- case *ast.KeyValueExpr:
- var sel *ast.SelectorExpr
- sel, ok = elt.Value.(*ast.SelectorExpr)
- if !ok {
- return
- }
-
- if elt.Key.(*ast.Ident).Name != sel.Sel.Name {
- return
- }
- t, id, ok = getSelType(elt.Value)
- }
- if !ok {
- return
- }
- // All fields must be initialized from the same object
- if ident != nil && ident.Obj != id.Obj {
- return
- }
- typ2, _ = t.(*types.Named)
- if typ2 == nil {
- return
- }
- ident = id
- }
-
- if typ2 == nil {
- return
- }
-
- if typ1.Obj().Pkg() != typ2.Obj().Pkg() {
- // Do not suggest type conversions between different
- // packages. Types in different packages might only match
- // by coincidence. Furthermore, if the dependency ever
- // adds more fields to its type, it could break the code
- // that relies on the type conversion to work.
- return
- }
-
- s2, ok := typ2.Underlying().(*types.Struct)
- if !ok {
- return
- }
- if typ1 == typ2 {
- return
- }
- if code.IsGoVersion(pass, 8) {
- if !types.IdenticalIgnoreTags(s1, s2) {
- return
- }
- } else {
- if !types.Identical(s1, s2) {
- return
- }
- }
-
- r := &ast.CallExpr{
- Fun: lit.Type,
- Args: []ast.Expr{ident},
- }
- report.Report(pass, node,
- fmt.Sprintf("should convert %s (type %s) to %s instead of using struct literal", ident.Name, typ2.Obj().Name(), typ1.Obj().Name()),
- report.FilterGenerated(),
- report.Fixes(edit.Fix("use type conversion", edit.ReplaceWithNode(pass.Fset, node, r))))
- }
- code.Preorder(pass, fn, (*ast.UnaryExpr)(nil), (*ast.CompositeLit)(nil))
- return nil, nil
-}
-
-func CheckTrim(pass *analysis.Pass) (interface{}, error) {
- sameNonDynamic := func(node1, node2 ast.Node) bool {
- if reflect.TypeOf(node1) != reflect.TypeOf(node2) {
- return false
- }
-
- switch node1 := node1.(type) {
- case *ast.Ident:
- return node1.Obj == node2.(*ast.Ident).Obj
- case *ast.SelectorExpr:
- return report.Render(pass, node1) == report.Render(pass, node2)
- case *ast.IndexExpr:
- return report.Render(pass, node1) == report.Render(pass, node2)
- }
- return false
- }
-
- isLenOnIdent := func(fn ast.Expr, ident ast.Expr) bool {
- call, ok := fn.(*ast.CallExpr)
- if !ok {
- return false
- }
- if fn, ok := call.Fun.(*ast.Ident); !ok || fn.Name != "len" {
- return false
- }
- if len(call.Args) != 1 {
- return false
- }
- return sameNonDynamic(call.Args[Arg("len.v")], ident)
- }
-
- fn := func(node ast.Node) {
- var pkg string
- var fun string
-
- ifstmt := node.(*ast.IfStmt)
- if ifstmt.Init != nil {
- return
- }
- if ifstmt.Else != nil {
- return
- }
- if len(ifstmt.Body.List) != 1 {
- return
- }
- condCall, ok := ifstmt.Cond.(*ast.CallExpr)
- if !ok {
- return
- }
-
- condCallName := code.CallNameAST(pass, condCall)
- switch condCallName {
- case "strings.HasPrefix":
- pkg = "strings"
- fun = "HasPrefix"
- case "strings.HasSuffix":
- pkg = "strings"
- fun = "HasSuffix"
- case "strings.Contains":
- pkg = "strings"
- fun = "Contains"
- case "bytes.HasPrefix":
- pkg = "bytes"
- fun = "HasPrefix"
- case "bytes.HasSuffix":
- pkg = "bytes"
- fun = "HasSuffix"
- case "bytes.Contains":
- pkg = "bytes"
- fun = "Contains"
- default:
- return
- }
-
- assign, ok := ifstmt.Body.List[0].(*ast.AssignStmt)
- if !ok {
- return
- }
- if assign.Tok != token.ASSIGN {
- return
- }
- if len(assign.Lhs) != 1 || len(assign.Rhs) != 1 {
- return
- }
- if !sameNonDynamic(condCall.Args[0], assign.Lhs[0]) {
- return
- }
-
- switch rhs := assign.Rhs[0].(type) {
- case *ast.CallExpr:
- if len(rhs.Args) < 2 || !sameNonDynamic(condCall.Args[0], rhs.Args[0]) || !sameNonDynamic(condCall.Args[1], rhs.Args[1]) {
- return
- }
-
- rhsName := code.CallNameAST(pass, rhs)
- if condCallName == "strings.HasPrefix" && rhsName == "strings.TrimPrefix" ||
- condCallName == "strings.HasSuffix" && rhsName == "strings.TrimSuffix" ||
- condCallName == "strings.Contains" && rhsName == "strings.Replace" ||
- condCallName == "bytes.HasPrefix" && rhsName == "bytes.TrimPrefix" ||
- condCallName == "bytes.HasSuffix" && rhsName == "bytes.TrimSuffix" ||
- condCallName == "bytes.Contains" && rhsName == "bytes.Replace" {
- report.Report(pass, ifstmt, fmt.Sprintf("should replace this if statement with an unconditional %s", rhsName), report.FilterGenerated())
- }
- return
- case *ast.SliceExpr:
- slice := rhs
- if !ok {
- return
- }
- if slice.Slice3 {
- return
- }
- if !sameNonDynamic(slice.X, condCall.Args[0]) {
- return
- }
- var index ast.Expr
- switch fun {
- case "HasPrefix":
- // TODO(dh) We could detect a High that is len(s), but another
- // rule will already flag that, anyway.
- if slice.High != nil {
- return
- }
- index = slice.Low
- case "HasSuffix":
- if slice.Low != nil {
- n, ok := code.ExprToInt(pass, slice.Low)
- if !ok || n != 0 {
- return
- }
- }
- index = slice.High
- }
-
- switch index := index.(type) {
- case *ast.CallExpr:
- if fun != "HasPrefix" {
- return
- }
- if fn, ok := index.Fun.(*ast.Ident); !ok || fn.Name != "len" {
- return
- }
- if len(index.Args) != 1 {
- return
- }
- id3 := index.Args[Arg("len.v")]
- switch oid3 := condCall.Args[1].(type) {
- case *ast.BasicLit:
- if pkg != "strings" {
- return
- }
- lit, ok := id3.(*ast.BasicLit)
- if !ok {
- return
- }
- s1, ok1 := code.ExprToString(pass, lit)
- s2, ok2 := code.ExprToString(pass, condCall.Args[1])
- if !ok1 || !ok2 || s1 != s2 {
- return
- }
- default:
- if !sameNonDynamic(id3, oid3) {
- return
- }
- }
- case *ast.BasicLit, *ast.Ident:
- if fun != "HasPrefix" {
- return
- }
- if pkg != "strings" {
- return
- }
- string, ok1 := code.ExprToString(pass, condCall.Args[1])
- int, ok2 := code.ExprToInt(pass, slice.Low)
- if !ok1 || !ok2 || int != int64(len(string)) {
- return
- }
- case *ast.BinaryExpr:
- if fun != "HasSuffix" {
- return
- }
- if index.Op != token.SUB {
- return
- }
- if !isLenOnIdent(index.X, condCall.Args[0]) ||
- !isLenOnIdent(index.Y, condCall.Args[1]) {
- return
- }
- default:
- return
- }
-
- var replacement string
- switch fun {
- case "HasPrefix":
- replacement = "TrimPrefix"
- case "HasSuffix":
- replacement = "TrimSuffix"
- }
- report.Report(pass, ifstmt, fmt.Sprintf("should replace this if statement with an unconditional %s.%s", pkg, replacement),
- report.ShortRange(),
- report.FilterGenerated())
- }
- }
- code.Preorder(pass, fn, (*ast.IfStmt)(nil))
- return nil, nil
-}
-
-var (
- checkLoopSlideQ = pattern.MustParse(`
- (ForStmt
- (AssignStmt initvar@(Ident _) _ (BasicLit "INT" "0"))
- (BinaryExpr initvar "<" limit@(Ident _))
- (IncDecStmt initvar "++")
- [(AssignStmt
- (IndexExpr slice@(Ident _) initvar)
- "="
- (IndexExpr slice (BinaryExpr offset@(Ident _) "+" initvar)))])`)
- checkLoopSlideR = pattern.MustParse(`
- (CallExpr
- (Ident "copy")
- [(SliceExpr slice nil limit nil)
- (SliceExpr slice offset nil nil)])`)
-)
-
-func CheckLoopSlide(pass *analysis.Pass) (interface{}, error) {
- // TODO(dh): detect bs[i+offset] in addition to bs[offset+i]
- // TODO(dh): consider merging this function with LintLoopCopy
- // TODO(dh): detect length that is an expression, not a variable name
- // TODO(dh): support sliding to a different offset than the beginning of the slice
-
- fn := func(node ast.Node) {
- loop := node.(*ast.ForStmt)
- m, edits, ok := MatchAndEdit(pass, checkLoopSlideQ, checkLoopSlideR, loop)
- if !ok {
- return
- }
- if _, ok := pass.TypesInfo.TypeOf(m.State["slice"].(*ast.Ident)).Underlying().(*types.Slice); !ok {
- return
- }
-
- report.Report(pass, loop, "should use copy() instead of loop for sliding slice elements",
- report.ShortRange(),
- report.FilterGenerated(),
- report.Fixes(edit.Fix("use copy() instead of loop", edits...)))
- }
- code.Preorder(pass, fn, (*ast.ForStmt)(nil))
- return nil, nil
-}
-
-var (
- checkMakeLenCapQ1 = pattern.MustParse(`(CallExpr (Builtin "make") [typ size@(BasicLit "INT" "0")])`)
- checkMakeLenCapQ2 = pattern.MustParse(`(CallExpr (Builtin "make") [typ size size])`)
-)
-
-func CheckMakeLenCap(pass *analysis.Pass) (interface{}, error) {
- fn := func(node ast.Node) {
- if pass.Pkg.Path() == "runtime_test" && filepath.Base(pass.Fset.Position(node.Pos()).Filename) == "map_test.go" {
- // special case of runtime tests testing map creation
- return
- }
- if m, ok := Match(pass, checkMakeLenCapQ1, node); ok {
- T := m.State["typ"].(ast.Expr)
- size := m.State["size"].(ast.Node)
- if _, ok := pass.TypesInfo.TypeOf(T).Underlying().(*types.Slice); ok {
- return
- }
- report.Report(pass, size, fmt.Sprintf("should use make(%s) instead", report.Render(pass, T)), report.FilterGenerated())
- } else if m, ok := Match(pass, checkMakeLenCapQ2, node); ok {
- // TODO(dh): don't consider sizes identical if they're
- // dynamic. for example: make(T, <-ch, <-ch).
- T := m.State["typ"].(ast.Expr)
- size := m.State["size"].(ast.Node)
- report.Report(pass, size,
- fmt.Sprintf("should use make(%s, %s) instead", report.Render(pass, T), report.Render(pass, size)),
- report.FilterGenerated())
- }
- }
- code.Preorder(pass, fn, (*ast.CallExpr)(nil))
- return nil, nil
-}
-
-var (
- checkAssertNotNilFn1Q = pattern.MustParse(`
- (IfStmt
- (AssignStmt [(Ident "_") ok@(Object _)] _ [(TypeAssertExpr assert@(Object _) _)])
- (Or
- (BinaryExpr ok "&&" (BinaryExpr assert "!=" (Builtin "nil")))
- (BinaryExpr (BinaryExpr assert "!=" (Builtin "nil")) "&&" ok))
- _
- _)`)
- checkAssertNotNilFn2Q = pattern.MustParse(`
- (IfStmt
- nil
- (BinaryExpr lhs@(Object _) "!=" (Builtin "nil"))
- [
- ifstmt@(IfStmt
- (AssignStmt [(Ident "_") ok@(Object _)] _ [(TypeAssertExpr lhs _)])
- ok
- _
- _)
- ]
- nil)`)
-)
-
-func CheckAssertNotNil(pass *analysis.Pass) (interface{}, error) {
- fn1 := func(node ast.Node) {
- m, ok := Match(pass, checkAssertNotNilFn1Q, node)
- if !ok {
- return
- }
- assert := m.State["assert"].(types.Object)
- assign := m.State["ok"].(types.Object)
- report.Report(pass, node, fmt.Sprintf("when %s is true, %s can't be nil", assign.Name(), assert.Name()),
- report.ShortRange(),
- report.FilterGenerated())
- }
- fn2 := func(node ast.Node) {
- m, ok := Match(pass, checkAssertNotNilFn2Q, node)
- if !ok {
- return
- }
- ifstmt := m.State["ifstmt"].(*ast.IfStmt)
- lhs := m.State["lhs"].(types.Object)
- assignIdent := m.State["ok"].(types.Object)
- report.Report(pass, ifstmt, fmt.Sprintf("when %s is true, %s can't be nil", assignIdent.Name(), lhs.Name()),
- report.ShortRange(),
- report.FilterGenerated())
- }
- code.Preorder(pass, fn1, (*ast.IfStmt)(nil))
- code.Preorder(pass, fn2, (*ast.IfStmt)(nil))
- return nil, nil
-}
-
-func CheckDeclareAssign(pass *analysis.Pass) (interface{}, error) {
- hasMultipleAssignments := func(root ast.Node, ident *ast.Ident) bool {
- num := 0
- ast.Inspect(root, func(node ast.Node) bool {
- if num >= 2 {
- return false
- }
- assign, ok := node.(*ast.AssignStmt)
- if !ok {
- return true
- }
- for _, lhs := range assign.Lhs {
- if oident, ok := lhs.(*ast.Ident); ok {
- if oident.Obj == ident.Obj {
- num++
- }
- }
- }
-
- return true
- })
- return num >= 2
- }
- fn := func(node ast.Node) {
- block := node.(*ast.BlockStmt)
- if len(block.List) < 2 {
- return
- }
- for i, stmt := range block.List[:len(block.List)-1] {
- _ = i
- decl, ok := stmt.(*ast.DeclStmt)
- if !ok {
- continue
- }
- gdecl, ok := decl.Decl.(*ast.GenDecl)
- if !ok || gdecl.Tok != token.VAR || len(gdecl.Specs) != 1 {
- continue
- }
- vspec, ok := gdecl.Specs[0].(*ast.ValueSpec)
- if !ok || len(vspec.Names) != 1 || len(vspec.Values) != 0 {
- continue
- }
-
- assign, ok := block.List[i+1].(*ast.AssignStmt)
- if !ok || assign.Tok != token.ASSIGN {
- continue
- }
- if len(assign.Lhs) != 1 || len(assign.Rhs) != 1 {
- continue
- }
- ident, ok := assign.Lhs[0].(*ast.Ident)
- if !ok {
- continue
- }
- if vspec.Names[0].Obj != ident.Obj {
- continue
- }
-
- if refersTo(pass, assign.Rhs[0], pass.TypesInfo.ObjectOf(ident)) {
- continue
- }
- if hasMultipleAssignments(block, ident) {
- continue
- }
-
- r := &ast.GenDecl{
- Specs: []ast.Spec{
- &ast.ValueSpec{
- Names: vspec.Names,
- Values: []ast.Expr{assign.Rhs[0]},
- Type: vspec.Type,
- },
- },
- Tok: gdecl.Tok,
- }
- report.Report(pass, decl, "should merge variable declaration with assignment on next line",
- report.FilterGenerated(),
- report.Fixes(edit.Fix("merge declaration with assignment", edit.ReplaceWithNode(pass.Fset, edit.Range{decl.Pos(), assign.End()}, r))))
- }
- }
- code.Preorder(pass, fn, (*ast.BlockStmt)(nil))
- return nil, nil
-}
-
-func CheckRedundantBreak(pass *analysis.Pass) (interface{}, error) {
- fn1 := func(node ast.Node) {
- clause := node.(*ast.CaseClause)
- if len(clause.Body) < 2 {
- return
- }
- branch, ok := clause.Body[len(clause.Body)-1].(*ast.BranchStmt)
- if !ok || branch.Tok != token.BREAK || branch.Label != nil {
- return
- }
- report.Report(pass, branch, "redundant break statement", report.FilterGenerated())
- }
- fn2 := func(node ast.Node) {
- var ret *ast.FieldList
- var body *ast.BlockStmt
- switch x := node.(type) {
- case *ast.FuncDecl:
- ret = x.Type.Results
- body = x.Body
- case *ast.FuncLit:
- ret = x.Type.Results
- body = x.Body
- default:
- ExhaustiveTypeSwitch(node)
- }
- // if the func has results, a return can't be redundant.
- // similarly, if there are no statements, there can be
- // no return.
- if ret != nil || body == nil || len(body.List) < 1 {
- return
- }
- rst, ok := body.List[len(body.List)-1].(*ast.ReturnStmt)
- if !ok {
- return
- }
- // we don't need to check rst.Results as we already
- // checked x.Type.Results to be nil.
- report.Report(pass, rst, "redundant return statement", report.FilterGenerated())
- }
- code.Preorder(pass, fn1, (*ast.CaseClause)(nil))
- code.Preorder(pass, fn2, (*ast.FuncDecl)(nil), (*ast.FuncLit)(nil))
- return nil, nil
-}
-
-func isStringer(T types.Type, msCache *typeutil.MethodSetCache) bool {
- ms := msCache.MethodSet(T)
- sel := ms.Lookup(nil, "String")
- if sel == nil {
- return false
- }
- fn, ok := sel.Obj().(*types.Func)
- if !ok {
- // should be unreachable
- return false
- }
- sig := fn.Type().(*types.Signature)
- if sig.Params().Len() != 0 {
- return false
- }
- if sig.Results().Len() != 1 {
- return false
- }
- if !code.IsType(sig.Results().At(0).Type(), "string") {
- return false
- }
- return true
-}
-
-var checkRedundantSprintfQ = pattern.MustParse(`(CallExpr (Function "fmt.Sprintf") [format arg])`)
-
-func CheckRedundantSprintf(pass *analysis.Pass) (interface{}, error) {
- fn := func(node ast.Node) {
- m, ok := Match(pass, checkRedundantSprintfQ, node)
- if !ok {
- return
- }
-
- format := m.State["format"].(ast.Expr)
- arg := m.State["arg"].(ast.Expr)
- if s, ok := code.ExprToString(pass, format); !ok || s != "%s" {
- return
- }
- typ := pass.TypesInfo.TypeOf(arg)
-
- irpkg := pass.ResultOf[buildir.Analyzer].(*buildir.IR).Pkg
- if types.TypeString(typ, nil) != "reflect.Value" && isStringer(typ, &irpkg.Prog.MethodSets) {
- replacement := &ast.CallExpr{
- Fun: &ast.SelectorExpr{
- X: arg,
- Sel: &ast.Ident{Name: "String"},
- },
- }
- report.Report(pass, node, "should use String() instead of fmt.Sprintf",
- report.Fixes(edit.Fix("replace with call to String method", edit.ReplaceWithNode(pass.Fset, node, replacement))))
- return
- }
-
- if typ.Underlying() == types.Universe.Lookup("string").Type() {
- if typ == types.Universe.Lookup("string").Type() {
- report.Report(pass, node, "the argument is already a string, there's no need to use fmt.Sprintf",
- report.FilterGenerated(),
- report.Fixes(edit.Fix("remove unnecessary call to fmt.Sprintf", edit.ReplaceWithNode(pass.Fset, node, arg))))
- } else {
- replacement := &ast.CallExpr{
- Fun: &ast.Ident{Name: "string"},
- Args: []ast.Expr{arg},
- }
- report.Report(pass, node, "the argument's underlying type is a string, should use a simple conversion instead of fmt.Sprintf",
- report.FilterGenerated(),
- report.Fixes(edit.Fix("replace with conversion to string", edit.ReplaceWithNode(pass.Fset, node, replacement))))
- }
- }
- }
- code.Preorder(pass, fn, (*ast.CallExpr)(nil))
- return nil, nil
-}
-
-var (
- checkErrorsNewSprintfQ = pattern.MustParse(`(CallExpr (Function "errors.New") [(CallExpr (Function "fmt.Sprintf") args)])`)
- checkErrorsNewSprintfR = pattern.MustParse(`(CallExpr (SelectorExpr (Ident "fmt") (Ident "Errorf")) args)`)
-)
-
-func CheckErrorsNewSprintf(pass *analysis.Pass) (interface{}, error) {
- fn := func(node ast.Node) {
- if _, edits, ok := MatchAndEdit(pass, checkErrorsNewSprintfQ, checkErrorsNewSprintfR, node); ok {
- // TODO(dh): the suggested fix may leave an unused import behind
- report.Report(pass, node, "should use fmt.Errorf(...) instead of errors.New(fmt.Sprintf(...))",
- report.FilterGenerated(),
- report.Fixes(edit.Fix("use fmt.Errorf", edits...)))
- }
- }
- code.Preorder(pass, fn, (*ast.CallExpr)(nil))
- return nil, nil
-}
-
-func CheckRangeStringRunes(pass *analysis.Pass) (interface{}, error) {
- return sharedcheck.CheckRangeStringRunes(pass)
-}
-
-var checkNilCheckAroundRangeQ = pattern.MustParse(`
- (IfStmt
- nil
- (BinaryExpr x@(Object _) "!=" (Builtin "nil"))
- [(RangeStmt _ _ _ x _)]
- nil)`)
-
-func CheckNilCheckAroundRange(pass *analysis.Pass) (interface{}, error) {
- fn := func(node ast.Node) {
- m, ok := Match(pass, checkNilCheckAroundRangeQ, node)
- if !ok {
- return
- }
- switch m.State["x"].(types.Object).Type().Underlying().(type) {
- case *types.Slice, *types.Map:
- report.Report(pass, node, "unnecessary nil check around range",
- report.ShortRange(),
- report.FilterGenerated())
-
- }
- }
- code.Preorder(pass, fn, (*ast.IfStmt)(nil))
- return nil, nil
-}
-
-func isPermissibleSort(pass *analysis.Pass, node ast.Node) bool {
- call := node.(*ast.CallExpr)
- typeconv, ok := call.Args[0].(*ast.CallExpr)
- if !ok {
- return true
- }
-
- sel, ok := typeconv.Fun.(*ast.SelectorExpr)
- if !ok {
- return true
- }
- name := code.SelectorName(pass, sel)
- switch name {
- case "sort.IntSlice", "sort.Float64Slice", "sort.StringSlice":
- default:
- return true
- }
-
- return false
-}
-
-func CheckSortHelpers(pass *analysis.Pass) (interface{}, error) {
- type Error struct {
- node ast.Node
- msg string
- }
- var allErrors []Error
- fn := func(node ast.Node) {
- var body *ast.BlockStmt
- switch node := node.(type) {
- case *ast.FuncLit:
- body = node.Body
- case *ast.FuncDecl:
- body = node.Body
- default:
- ExhaustiveTypeSwitch(node)
- }
- if body == nil {
- return
- }
-
- var errors []Error
- permissible := false
- fnSorts := func(node ast.Node) bool {
- if permissible {
- return false
- }
- if !code.IsCallToAST(pass, node, "sort.Sort") {
- return true
- }
- if isPermissibleSort(pass, node) {
- permissible = true
- return false
- }
- call := node.(*ast.CallExpr)
- typeconv := call.Args[Arg("sort.Sort.data")].(*ast.CallExpr)
- sel := typeconv.Fun.(*ast.SelectorExpr)
- name := code.SelectorName(pass, sel)
-
- switch name {
- case "sort.IntSlice":
- errors = append(errors, Error{node, "should use sort.Ints(...) instead of sort.Sort(sort.IntSlice(...))"})
- case "sort.Float64Slice":
- errors = append(errors, Error{node, "should use sort.Float64s(...) instead of sort.Sort(sort.Float64Slice(...))"})
- case "sort.StringSlice":
- errors = append(errors, Error{node, "should use sort.Strings(...) instead of sort.Sort(sort.StringSlice(...))"})
- }
- return true
- }
- ast.Inspect(body, fnSorts)
-
- if permissible {
- return
- }
- allErrors = append(allErrors, errors...)
- }
- code.Preorder(pass, fn, (*ast.FuncLit)(nil), (*ast.FuncDecl)(nil))
- sort.Slice(allErrors, func(i, j int) bool {
- return allErrors[i].node.Pos() < allErrors[j].node.Pos()
- })
- var prev token.Pos
- for _, err := range allErrors {
- if err.node.Pos() == prev {
- continue
- }
- prev = err.node.Pos()
- report.Report(pass, err.node, err.msg, report.FilterGenerated())
- }
- return nil, nil
-}
-
-var checkGuardedDeleteQ = pattern.MustParse(`
- (IfStmt
- (AssignStmt
- [(Ident "_") ok@(Ident _)]
- ":="
- (IndexExpr m key))
- ok
- [call@(CallExpr (Builtin "delete") [m key])]
- nil)`)
-
-func CheckGuardedDelete(pass *analysis.Pass) (interface{}, error) {
- fn := func(node ast.Node) {
- if m, ok := Match(pass, checkGuardedDeleteQ, node); ok {
- report.Report(pass, node, "unnecessary guard around call to delete",
- report.ShortRange(),
- report.FilterGenerated(),
- report.Fixes(edit.Fix("remove guard", edit.ReplaceWithNode(pass.Fset, node, m.State["call"].(ast.Node)))))
- }
- }
-
- code.Preorder(pass, fn, (*ast.IfStmt)(nil))
- return nil, nil
-}
-
-var (
- checkSimplifyTypeSwitchQ = pattern.MustParse(`
- (TypeSwitchStmt
- nil
- expr@(TypeAssertExpr ident@(Ident _) _)
- body)`)
- checkSimplifyTypeSwitchR = pattern.MustParse(`(AssignStmt ident ":=" expr)`)
-)
-
-func CheckSimplifyTypeSwitch(pass *analysis.Pass) (interface{}, error) {
- fn := func(node ast.Node) {
- m, ok := Match(pass, checkSimplifyTypeSwitchQ, node)
- if !ok {
- return
- }
- stmt := node.(*ast.TypeSwitchStmt)
- expr := m.State["expr"].(ast.Node)
- ident := m.State["ident"].(*ast.Ident)
-
- x := pass.TypesInfo.ObjectOf(ident)
- var allOffenders []*ast.TypeAssertExpr
- canSuggestFix := true
- for _, clause := range stmt.Body.List {
- clause := clause.(*ast.CaseClause)
- if len(clause.List) != 1 {
- continue
- }
- hasUnrelatedAssertion := false
- var offenders []*ast.TypeAssertExpr
- ast.Inspect(clause, func(node ast.Node) bool {
- assert2, ok := node.(*ast.TypeAssertExpr)
- if !ok {
- return true
- }
- ident, ok := assert2.X.(*ast.Ident)
- if !ok {
- hasUnrelatedAssertion = true
- return false
- }
- if pass.TypesInfo.ObjectOf(ident) != x {
- hasUnrelatedAssertion = true
- return false
- }
-
- if !types.Identical(pass.TypesInfo.TypeOf(clause.List[0]), pass.TypesInfo.TypeOf(assert2.Type)) {
- hasUnrelatedAssertion = true
- return false
- }
- offenders = append(offenders, assert2)
- return true
- })
- if !hasUnrelatedAssertion {
- // don't flag cases that have other type assertions
- // unrelated to the one in the case clause. often
- // times, this is done for symmetry, when two
- // different values have to be asserted to the same
- // type.
- allOffenders = append(allOffenders, offenders...)
- }
- canSuggestFix = canSuggestFix && !hasUnrelatedAssertion
- }
- if len(allOffenders) != 0 {
- var opts []report.Option
- for _, offender := range allOffenders {
- opts = append(opts, report.Related(offender, "could eliminate this type assertion"))
- }
- opts = append(opts, report.FilterGenerated())
-
- msg := fmt.Sprintf("assigning the result of this type assertion to a variable (switch %s := %s.(type)) could eliminate type assertions in switch cases",
- report.Render(pass, ident), report.Render(pass, ident))
- if canSuggestFix {
- var edits []analysis.TextEdit
- edits = append(edits, edit.ReplaceWithPattern(pass, checkSimplifyTypeSwitchR, m.State, expr))
- for _, offender := range allOffenders {
- edits = append(edits, edit.ReplaceWithNode(pass.Fset, offender, offender.X))
- }
- opts = append(opts, report.Fixes(edit.Fix("simplify type switch", edits...)))
- report.Report(pass, expr, msg, opts...)
- } else {
- report.Report(pass, expr, msg, opts...)
- }
- }
- }
- code.Preorder(pass, fn, (*ast.TypeSwitchStmt)(nil))
- return nil, nil
-}
-
-func CheckRedundantCanonicalHeaderKey(pass *analysis.Pass) (interface{}, error) {
- fn := func(node ast.Node) {
- call := node.(*ast.CallExpr)
- callName := code.CallNameAST(pass, call)
- switch callName {
- case "(net/http.Header).Add", "(net/http.Header).Del", "(net/http.Header).Get", "(net/http.Header).Set":
- default:
- return
- }
-
- if !code.IsCallToAST(pass, call.Args[0], "net/http.CanonicalHeaderKey") {
- return
- }
-
- report.Report(pass, call,
- fmt.Sprintf("calling net/http.CanonicalHeaderKey on the 'key' argument of %s is redundant", callName),
- report.FilterGenerated(),
- report.Fixes(edit.Fix("remove call to CanonicalHeaderKey", edit.ReplaceWithNode(pass.Fset, call.Args[0], call.Args[0].(*ast.CallExpr).Args[0]))))
- }
- code.Preorder(pass, fn, (*ast.CallExpr)(nil))
- return nil, nil
-}
-
-var checkUnnecessaryGuardQ = pattern.MustParse(`
- (Or
- (IfStmt
- (AssignStmt [(Ident "_") ok@(Ident _)] ":=" indexexpr@(IndexExpr _ _))
- ok
- set@(AssignStmt indexexpr "=" (CallExpr (Builtin "append") indexexpr:values))
- (AssignStmt indexexpr "=" (CompositeLit _ values)))
- (IfStmt
- (AssignStmt [(Ident "_") ok] ":=" indexexpr@(IndexExpr _ _))
- ok
- set@(AssignStmt indexexpr "+=" value)
- (AssignStmt indexexpr "=" value))
- (IfStmt
- (AssignStmt [(Ident "_") ok] ":=" indexexpr@(IndexExpr _ _))
- ok
- set@(IncDecStmt indexexpr "++")
- (AssignStmt indexexpr "=" (BasicLit "INT" "1"))))`)
-
-func CheckUnnecessaryGuard(pass *analysis.Pass) (interface{}, error) {
- fn := func(node ast.Node) {
- if m, ok := Match(pass, checkUnnecessaryGuardQ, node); ok {
- if code.MayHaveSideEffects(pass, m.State["indexexpr"].(ast.Expr), nil) {
- return
- }
- report.Report(pass, node, "unnecessary guard around map access",
- report.ShortRange(),
- report.Fixes(edit.Fix("simplify map access", edit.ReplaceWithNode(pass.Fset, node, m.State["set"].(ast.Node)))))
- }
- }
- code.Preorder(pass, fn, (*ast.IfStmt)(nil))
- return nil, nil
-}
-
-var (
- checkElaborateSleepQ = pattern.MustParse(`(SelectStmt (CommClause (UnaryExpr "<-" (CallExpr (Function "time.After") [arg])) body))`)
- checkElaborateSleepR = pattern.MustParse(`(CallExpr (SelectorExpr (Ident "time") (Ident "Sleep")) [arg])`)
-)
-
-func CheckElaborateSleep(pass *analysis.Pass) (interface{}, error) {
- fn := func(node ast.Node) {
- if m, ok := Match(pass, checkElaborateSleepQ, node); ok {
- if body, ok := m.State["body"].([]ast.Stmt); ok && len(body) == 0 {
- report.Report(pass, node, "should use time.Sleep instead of elaborate way of sleeping",
- report.ShortRange(),
- report.FilterGenerated(),
- report.Fixes(edit.Fix("Use time.Sleep", edit.ReplaceWithPattern(pass, checkElaborateSleepR, m.State, node))))
- } else {
- // TODO(dh): we could make a suggested fix if the body
- // doesn't declare or shadow any identifiers
- report.Report(pass, node, "should use time.Sleep instead of elaborate way of sleeping",
- report.ShortRange(),
- report.FilterGenerated())
- }
- }
- }
- code.Preorder(pass, fn, (*ast.SelectStmt)(nil))
- return nil, nil
-}
-
-var checkPrintSprintQ = pattern.MustParse(`
- (Or
- (CallExpr
- fn@(Or
- (Function "fmt.Print")
- (Function "fmt.Sprint")
- (Function "fmt.Println")
- (Function "fmt.Sprintln"))
- [(CallExpr (Function "fmt.Sprintf") f:_)])
- (CallExpr
- fn@(Or
- (Function "fmt.Fprint")
- (Function "fmt.Fprintln"))
- [_ (CallExpr (Function "fmt.Sprintf") f:_)]))`)
-
-func CheckPrintSprintf(pass *analysis.Pass) (interface{}, error) {
- fn := func(node ast.Node) {
- m, ok := Match(pass, checkPrintSprintQ, node)
- if !ok {
- return
- }
-
- name := m.State["fn"].(*types.Func).Name()
- var msg string
- switch name {
- case "Print", "Fprint", "Sprint":
- newname := name + "f"
- msg = fmt.Sprintf("should use fmt.%s instead of fmt.%s(fmt.Sprintf(...))", newname, name)
- case "Println", "Fprintln", "Sprintln":
- if _, ok := m.State["f"].(*ast.BasicLit); !ok {
- // This may be an instance of
- // fmt.Println(fmt.Sprintf(arg, ...)) where arg is an
- // externally provided format string and the caller
- // cannot guarantee that the format string ends with a
- // newline.
- return
- }
- newname := name[:len(name)-2] + "f"
- msg = fmt.Sprintf("should use fmt.%s instead of fmt.%s(fmt.Sprintf(...)) (but don't forget the newline)", newname, name)
- }
- report.Report(pass, node, msg,
- report.FilterGenerated())
- }
- code.Preorder(pass, fn, (*ast.CallExpr)(nil))
- return nil, nil
-}
-
-var checkSprintLiteralQ = pattern.MustParse(`
- (CallExpr
- fn@(Or
- (Function "fmt.Sprint")
- (Function "fmt.Sprintf"))
- [lit@(BasicLit "STRING" _)])`)
-
-func CheckSprintLiteral(pass *analysis.Pass) (interface{}, error) {
- // We only flag calls with string literals, not expressions of
- // type string, because some people use fmt.Sprint(s) as a pattern
- // for copying strings, which may be useful when extracing a small
- // substring from a large string.
- fn := func(node ast.Node) {
- m, ok := Match(pass, checkSprintLiteralQ, node)
- if !ok {
- return
- }
- callee := m.State["fn"].(*types.Func)
- lit := m.State["lit"].(*ast.BasicLit)
- if callee.Name() == "Sprintf" {
- if strings.ContainsRune(lit.Value, '%') {
- // This might be a format string
- return
- }
- }
- report.Report(pass, node, fmt.Sprintf("unnecessary use of fmt.%s", callee.Name()),
- report.FilterGenerated(),
- report.Fixes(edit.Fix("Replace with string literal", edit.ReplaceWithNode(pass.Fset, node, lit))))
- }
- code.Preorder(pass, fn, (*ast.CallExpr)(nil))
- return nil, nil
-}
diff --git a/vendor/honnef.co/go/tools/staticcheck/analysis.go b/vendor/honnef.co/go/tools/staticcheck/analysis.go
deleted file mode 100644
index 6590312d2f1..00000000000
--- a/vendor/honnef.co/go/tools/staticcheck/analysis.go
+++ /dev/null
@@ -1,267 +0,0 @@
-package staticcheck
-
-import (
- "honnef.co/go/tools/facts"
- "honnef.co/go/tools/internal/passes/buildir"
- "honnef.co/go/tools/lint/lintutil"
-
- "golang.org/x/tools/go/analysis"
- "golang.org/x/tools/go/analysis/passes/inspect"
-)
-
-func makeCallCheckerAnalyzer(rules map[string]CallCheck, extraReqs ...*analysis.Analyzer) *analysis.Analyzer {
- reqs := []*analysis.Analyzer{buildir.Analyzer, facts.TokenFile}
- reqs = append(reqs, extraReqs...)
- return &analysis.Analyzer{
- Run: callChecker(rules),
- Requires: reqs,
- }
-}
-
-var Analyzers = lintutil.InitializeAnalyzers(Docs, map[string]*analysis.Analyzer{
- "SA1000": makeCallCheckerAnalyzer(checkRegexpRules),
- "SA1001": {
- Run: CheckTemplate,
- Requires: []*analysis.Analyzer{inspect.Analyzer},
- },
- "SA1002": makeCallCheckerAnalyzer(checkTimeParseRules),
- "SA1003": makeCallCheckerAnalyzer(checkEncodingBinaryRules),
- "SA1004": {
- Run: CheckTimeSleepConstant,
- Requires: []*analysis.Analyzer{inspect.Analyzer},
- },
- "SA1005": {
- Run: CheckExec,
- Requires: []*analysis.Analyzer{inspect.Analyzer},
- },
- "SA1006": {
- Run: CheckUnsafePrintf,
- Requires: []*analysis.Analyzer{inspect.Analyzer},
- },
- "SA1007": makeCallCheckerAnalyzer(checkURLsRules),
- "SA1008": {
- Run: CheckCanonicalHeaderKey,
- Requires: []*analysis.Analyzer{inspect.Analyzer},
- },
- "SA1010": makeCallCheckerAnalyzer(checkRegexpFindAllRules),
- "SA1011": makeCallCheckerAnalyzer(checkUTF8CutsetRules),
- "SA1012": {
- Run: CheckNilContext,
- Requires: []*analysis.Analyzer{inspect.Analyzer},
- },
- "SA1013": {
- Run: CheckSeeker,
- Requires: []*analysis.Analyzer{inspect.Analyzer},
- },
- "SA1014": makeCallCheckerAnalyzer(checkUnmarshalPointerRules),
- "SA1015": {
- Run: CheckLeakyTimeTick,
- Requires: []*analysis.Analyzer{buildir.Analyzer},
- },
- "SA1016": {
- Run: CheckUntrappableSignal,
- Requires: []*analysis.Analyzer{inspect.Analyzer},
- },
- "SA1017": makeCallCheckerAnalyzer(checkUnbufferedSignalChanRules),
- "SA1018": makeCallCheckerAnalyzer(checkStringsReplaceZeroRules),
- "SA1019": {
- Run: CheckDeprecated,
- Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Deprecated, facts.Generated},
- },
- "SA1020": makeCallCheckerAnalyzer(checkListenAddressRules),
- "SA1021": makeCallCheckerAnalyzer(checkBytesEqualIPRules),
- "SA1023": {
- Run: CheckWriterBufferModified,
- Requires: []*analysis.Analyzer{buildir.Analyzer},
- },
- "SA1024": makeCallCheckerAnalyzer(checkUniqueCutsetRules),
- "SA1025": {
- Run: CheckTimerResetReturnValue,
- Requires: []*analysis.Analyzer{buildir.Analyzer},
- },
- "SA1026": makeCallCheckerAnalyzer(checkUnsupportedMarshal),
- "SA1027": makeCallCheckerAnalyzer(checkAtomicAlignment),
- "SA1028": makeCallCheckerAnalyzer(checkSortSliceRules),
- "SA1029": makeCallCheckerAnalyzer(checkWithValueKeyRules),
-
- "SA2000": {
- Run: CheckWaitgroupAdd,
- Requires: []*analysis.Analyzer{inspect.Analyzer},
- },
- "SA2001": {
- Run: CheckEmptyCriticalSection,
- Requires: []*analysis.Analyzer{inspect.Analyzer},
- },
- "SA2002": {
- Run: CheckConcurrentTesting,
- Requires: []*analysis.Analyzer{buildir.Analyzer},
- },
- "SA2003": {
- Run: CheckDeferLock,
- Requires: []*analysis.Analyzer{buildir.Analyzer},
- },
-
- "SA3000": {
- Run: CheckTestMainExit,
- Requires: []*analysis.Analyzer{inspect.Analyzer},
- },
- "SA3001": {
- Run: CheckBenchmarkN,
- Requires: []*analysis.Analyzer{inspect.Analyzer},
- },
-
- "SA4000": {
- Run: CheckLhsRhsIdentical,
- Requires: []*analysis.Analyzer{inspect.Analyzer, facts.TokenFile, facts.Generated},
- },
- "SA4001": {
- Run: CheckIneffectiveCopy,
- Requires: []*analysis.Analyzer{inspect.Analyzer},
- },
- "SA4003": {
- Run: CheckExtremeComparison,
- Requires: []*analysis.Analyzer{inspect.Analyzer},
- },
- "SA4004": {
- Run: CheckIneffectiveLoop,
- Requires: []*analysis.Analyzer{inspect.Analyzer},
- },
- "SA4006": {
- Run: CheckUnreadVariableValues,
- Requires: []*analysis.Analyzer{buildir.Analyzer, facts.Generated},
- },
- "SA4008": {
- Run: CheckLoopCondition,
- Requires: []*analysis.Analyzer{buildir.Analyzer},
- },
- "SA4009": {
- Run: CheckArgOverwritten,
- Requires: []*analysis.Analyzer{buildir.Analyzer},
- },
- "SA4010": {
- Run: CheckIneffectiveAppend,
- Requires: []*analysis.Analyzer{buildir.Analyzer},
- },
- "SA4011": {
- Run: CheckScopedBreak,
- Requires: []*analysis.Analyzer{inspect.Analyzer},
- },
- "SA4012": {
- Run: CheckNaNComparison,
- Requires: []*analysis.Analyzer{buildir.Analyzer},
- },
- "SA4013": {
- Run: CheckDoubleNegation,
- Requires: []*analysis.Analyzer{inspect.Analyzer},
- },
- "SA4014": {
- Run: CheckRepeatedIfElse,
- Requires: []*analysis.Analyzer{inspect.Analyzer},
- },
- "SA4015": makeCallCheckerAnalyzer(checkMathIntRules),
- "SA4016": {
- Run: CheckSillyBitwiseOps,
- Requires: []*analysis.Analyzer{inspect.Analyzer, facts.TokenFile},
- },
- "SA4017": {
- Run: CheckPureFunctions,
- Requires: []*analysis.Analyzer{buildir.Analyzer, facts.Purity},
- },
- "SA4018": {
- Run: CheckSelfAssignment,
- Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated, facts.TokenFile, facts.Purity},
- },
- "SA4019": {
- Run: CheckDuplicateBuildConstraints,
- Requires: []*analysis.Analyzer{facts.Generated},
- },
- "SA4020": {
- Run: CheckUnreachableTypeCases,
- Requires: []*analysis.Analyzer{inspect.Analyzer},
- },
- "SA4021": {
- Run: CheckSingleArgAppend,
- Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated, facts.TokenFile},
- },
-
- "SA5000": {
- Run: CheckNilMaps,
- Requires: []*analysis.Analyzer{buildir.Analyzer},
- },
- "SA5001": {
- Run: CheckEarlyDefer,
- Requires: []*analysis.Analyzer{inspect.Analyzer},
- },
- "SA5002": {
- Run: CheckInfiniteEmptyLoop,
- Requires: []*analysis.Analyzer{inspect.Analyzer},
- },
- "SA5003": {
- Run: CheckDeferInInfiniteLoop,
- Requires: []*analysis.Analyzer{inspect.Analyzer},
- },
- "SA5004": {
- Run: CheckLoopEmptyDefault,
- Requires: []*analysis.Analyzer{inspect.Analyzer},
- },
- "SA5005": {
- Run: CheckCyclicFinalizer,
- Requires: []*analysis.Analyzer{buildir.Analyzer},
- },
- "SA5007": {
- Run: CheckInfiniteRecursion,
- Requires: []*analysis.Analyzer{buildir.Analyzer},
- },
- "SA5008": {
- Run: CheckStructTags,
- Requires: []*analysis.Analyzer{inspect.Analyzer},
- },
- "SA5009": makeCallCheckerAnalyzer(checkPrintfRules),
- "SA5010": {
- Run: CheckImpossibleTypeAssertion,
- Requires: []*analysis.Analyzer{buildir.Analyzer, facts.TokenFile},
- },
- "SA5011": {
- Run: CheckMaybeNil,
- Requires: []*analysis.Analyzer{buildir.Analyzer},
- },
-
- "SA6000": makeCallCheckerAnalyzer(checkRegexpMatchLoopRules),
- "SA6001": {
- Run: CheckMapBytesKey,
- Requires: []*analysis.Analyzer{buildir.Analyzer},
- },
- "SA6002": makeCallCheckerAnalyzer(checkSyncPoolValueRules),
- "SA6003": {
- Run: CheckRangeStringRunes,
- Requires: []*analysis.Analyzer{buildir.Analyzer},
- },
- "SA6005": {
- Run: CheckToLowerToUpperComparison,
- Requires: []*analysis.Analyzer{inspect.Analyzer},
- },
-
- "SA9001": {
- Run: CheckDubiousDeferInChannelRangeLoop,
- Requires: []*analysis.Analyzer{inspect.Analyzer},
- },
- "SA9002": {
- Run: CheckNonOctalFileMode,
- Requires: []*analysis.Analyzer{inspect.Analyzer},
- },
- "SA9003": {
- Run: CheckEmptyBranch,
- Requires: []*analysis.Analyzer{buildir.Analyzer, facts.TokenFile, facts.Generated},
- },
- "SA9004": {
- Run: CheckMissingEnumTypesInDeclaration,
- Requires: []*analysis.Analyzer{inspect.Analyzer},
- },
- // Filtering generated code because it may include empty structs generated from data models.
- "SA9005": makeCallCheckerAnalyzer(checkNoopMarshal, facts.Generated),
-
- "SA4022": {
- Run: CheckAddressIsNil,
- Requires: []*analysis.Analyzer{inspect.Analyzer},
- },
-})
diff --git a/vendor/honnef.co/go/tools/staticcheck/buildtag.go b/vendor/honnef.co/go/tools/staticcheck/buildtag.go
deleted file mode 100644
index 58e1e4ae1d1..00000000000
--- a/vendor/honnef.co/go/tools/staticcheck/buildtag.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package staticcheck
-
-import (
- "go/ast"
- "strings"
-
- "honnef.co/go/tools/code"
-)
-
-func buildTags(f *ast.File) [][]string {
- var out [][]string
- for _, line := range strings.Split(code.Preamble(f), "\n") {
- if !strings.HasPrefix(line, "+build ") {
- continue
- }
- line = strings.TrimSpace(strings.TrimPrefix(line, "+build "))
- fields := strings.Fields(line)
- out = append(out, fields)
- }
- return out
-}
diff --git a/vendor/honnef.co/go/tools/staticcheck/doc.go b/vendor/honnef.co/go/tools/staticcheck/doc.go
deleted file mode 100644
index 17d28ad600a..00000000000
--- a/vendor/honnef.co/go/tools/staticcheck/doc.go
+++ /dev/null
@@ -1,880 +0,0 @@
-package staticcheck
-
-import "honnef.co/go/tools/lint"
-
-var Docs = map[string]*lint.Documentation{
- "SA1000": {
- Title: `Invalid regular expression`,
- Since: "2017.1",
- },
-
- "SA1001": {
- Title: `Invalid template`,
- Since: "2017.1",
- },
-
- "SA1002": {
- Title: `Invalid format in time.Parse`,
- Since: "2017.1",
- },
-
- "SA1003": {
- Title: `Unsupported argument to functions in encoding/binary`,
- Text: `The encoding/binary package can only serialize types with known sizes.
-This precludes the use of the int and uint types, as their sizes
-differ on different architectures. Furthermore, it doesn't support
-serializing maps, channels, strings, or functions.
-
-Before Go 1.8, bool wasn't supported, either.`,
- Since: "2017.1",
- },
-
- "SA1004": {
- Title: `Suspiciously small untyped constant in time.Sleep`,
- Text: `The time.Sleep function takes a time.Duration as its only argument.
-Durations are expressed in nanoseconds. Thus, calling time.Sleep(1)
-will sleep for 1 nanosecond. This is a common source of bugs, as sleep
-functions in other languages often accept seconds or milliseconds.
-
-The time package provides constants such as time.Second to express
-large durations. These can be combined with arithmetic to express
-arbitrary durations, for example '5 * time.Second' for 5 seconds.
-
-If you truly meant to sleep for a tiny amount of time, use
-'n * time.Nanosecond' to signal to Staticcheck that you did mean to sleep
-for some amount of nanoseconds.`,
- Since: "2017.1",
- },
-
- "SA1005": {
- Title: `Invalid first argument to exec.Command`,
- Text: `os/exec runs programs directly (using variants of the fork and exec
-system calls on Unix systems). This shouldn't be confused with running
-a command in a shell. The shell will allow for features such as input
-redirection, pipes, and general scripting. The shell is also
-responsible for splitting the user's input into a program name and its
-arguments. For example, the equivalent to
-
- ls / /tmp
-
-would be
-
- exec.Command("ls", "/", "/tmp")
-
-If you want to run a command in a shell, consider using something like
-the following – but be aware that not all systems, particularly
-Windows, will have a /bin/sh program:
-
- exec.Command("/bin/sh", "-c", "ls | grep Awesome")`,
- Since: "2017.1",
- },
-
- "SA1006": {
- Title: `Printf with dynamic first argument and no further arguments`,
- Text: `Using fmt.Printf with a dynamic first argument can lead to unexpected
-output. The first argument is a format string, where certain character
-combinations have special meaning. If, for example, a user were to
-enter a string such as
-
- Interest rate: 5%
-
-and you printed it with
-
- fmt.Printf(s)
-
-it would lead to the following output:
-
- Interest rate: 5%!(NOVERB).
-
-Similarly, forming the first parameter via string concatenation with
-user input should be avoided for the same reason. When printing user
-input, either use a variant of fmt.Print, or use the %s Printf verb
-and pass the string as an argument.`,
- Since: "2017.1",
- },
-
- "SA1007": {
- Title: `Invalid URL in net/url.Parse`,
- Since: "2017.1",
- },
-
- "SA1008": {
- Title: `Non-canonical key in http.Header map`,
- Text: `Keys in http.Header maps are canonical, meaning they follow a specific
-combination of uppercase and lowercase letters. Methods such as
-http.Header.Add and http.Header.Del convert inputs into this canonical
-form before manipulating the map.
-
-When manipulating http.Header maps directly, as opposed to using the
-provided methods, care should be taken to stick to canonical form in
-order to avoid inconsistencies. The following piece of code
-demonstrates one such inconsistency:
-
- h := http.Header{}
- h["etag"] = []string{"1234"}
- h.Add("etag", "5678")
- fmt.Println(h)
-
- // Output:
- // map[Etag:[5678] etag:[1234]]
-
-The easiest way of obtaining the canonical form of a key is to use
-http.CanonicalHeaderKey.`,
- Since: "2017.1",
- },
-
- "SA1010": {
- Title: `(*regexp.Regexp).FindAll called with n == 0, which will always return zero results`,
- Text: `If n >= 0, the function returns at most n matches/submatches. To
-return all results, specify a negative number.`,
- Since: "2017.1",
- },
-
- "SA1011": {
- Title: `Various methods in the strings package expect valid UTF-8, but invalid input is provided`,
- Since: "2017.1",
- },
-
- "SA1012": {
- Title: `A nil context.Context is being passed to a function, consider using context.TODO instead`,
- Since: "2017.1",
- },
-
- "SA1013": {
- Title: `io.Seeker.Seek is being called with the whence constant as the first argument, but it should be the second`,
- Since: "2017.1",
- },
-
- "SA1014": {
- Title: `Non-pointer value passed to Unmarshal or Decode`,
- Since: "2017.1",
- },
-
- "SA1015": {
- Title: `Using time.Tick in a way that will leak. Consider using time.NewTicker, and only use time.Tick in tests, commands and endless functions`,
- Since: "2017.1",
- },
-
- "SA1016": {
- Title: `Trapping a signal that cannot be trapped`,
- Text: `Not all signals can be intercepted by a process. Speficially, on
-UNIX-like systems, the syscall.SIGKILL and syscall.SIGSTOP signals are
-never passed to the process, but instead handled directly by the
-kernel. It is therefore pointless to try and handle these signals.`,
- Since: "2017.1",
- },
-
- "SA1017": {
- Title: `Channels used with os/signal.Notify should be buffered`,
- Text: `The os/signal package uses non-blocking channel sends when delivering
-signals. If the receiving end of the channel isn't ready and the
-channel is either unbuffered or full, the signal will be dropped. To
-avoid missing signals, the channel should be buffered and of the
-appropriate size. For a channel used for notification of just one
-signal value, a buffer of size 1 is sufficient.`,
- Since: "2017.1",
- },
-
- "SA1018": {
- Title: `strings.Replace called with n == 0, which does nothing`,
- Text: `With n == 0, zero instances will be replaced. To replace all
-instances, use a negative number, or use strings.ReplaceAll.`,
- Since: "2017.1",
- },
-
- "SA1019": {
- Title: `Using a deprecated function, variable, constant or field`,
- Since: "2017.1",
- },
-
- "SA1020": {
- Title: `Using an invalid host:port pair with a net.Listen-related function`,
- Since: "2017.1",
- },
-
- "SA1021": {
- Title: `Using bytes.Equal to compare two net.IP`,
- Text: `A net.IP stores an IPv4 or IPv6 address as a slice of bytes. The
-length of the slice for an IPv4 address, however, can be either 4 or
-16 bytes long, using different ways of representing IPv4 addresses. In
-order to correctly compare two net.IPs, the net.IP.Equal method should
-be used, as it takes both representations into account.`,
- Since: "2017.1",
- },
-
- "SA1023": {
- Title: `Modifying the buffer in an io.Writer implementation`,
- Text: `Write must not modify the slice data, even temporarily.`,
- Since: "2017.1",
- },
-
- "SA1024": {
- Title: `A string cutset contains duplicate characters`,
- Text: `The strings.TrimLeft and strings.TrimRight functions take cutsets, not
-prefixes. A cutset is treated as a set of characters to remove from a
-string. For example,
-
- strings.TrimLeft("42133word", "1234"))
-
-will result in the string "word" – any characters that are 1, 2, 3 or
-4 are cut from the left of the string.
-
-In order to remove one string from another, use strings.TrimPrefix instead.`,
- Since: "2017.1",
- },
-
- "SA1025": {
- Title: `It is not possible to use (*time.Timer).Reset's return value correctly`,
- Since: "2019.1",
- },
-
- "SA1026": {
- Title: `Cannot marshal channels or functions`,
- Since: "2019.2",
- },
-
- "SA1027": {
- Title: `Atomic access to 64-bit variable must be 64-bit aligned`,
- Text: `On ARM, x86-32, and 32-bit MIPS, it is the caller's responsibility to
-arrange for 64-bit alignment of 64-bit words accessed atomically. The
-first word in a variable or in an allocated struct, array, or slice
-can be relied upon to be 64-bit aligned.
-
-You can use the structlayout tool to inspect the alignment of fields
-in a struct.`,
- Since: "2019.2",
- },
-
- "SA1028": {
- Title: `sort.Slice can only be used on slices`,
- Text: `The first argument of sort.Slice must be a slice.`,
- Since: "2020.1",
- },
-
- "SA1029": {
- Title: `Inappropriate key in call to context.WithValue`,
- Text: `The provided key must be comparable and should not be
-of type string or any other built-in type to avoid collisions between
-packages using context. Users of WithValue should define their own
-types for keys.
-
-To avoid allocating when assigning to an interface{},
-context keys often have concrete type struct{}. Alternatively,
-exported context key variables' static type should be a pointer or
-interface.`,
- Since: "2020.1",
- },
-
- "SA2000": {
- Title: `sync.WaitGroup.Add called inside the goroutine, leading to a race condition`,
- Since: "2017.1",
- },
-
- "SA2001": {
- Title: `Empty critical section, did you mean to defer the unlock?`,
- Text: `Empty critical sections of the kind
-
- mu.Lock()
- mu.Unlock()
-
-are very often a typo, and the following was intended instead:
-
- mu.Lock()
- defer mu.Unlock()
-
-Do note that sometimes empty critical sections can be useful, as a
-form of signaling to wait on another goroutine. Many times, there are
-simpler ways of achieving the same effect. When that isn't the case,
-the code should be amply commented to avoid confusion. Combining such
-comments with a //lint:ignore directive can be used to suppress this
-rare false positive.`,
- Since: "2017.1",
- },
-
- "SA2002": {
- Title: `Called testing.T.FailNow or SkipNow in a goroutine, which isn't allowed`,
- Since: "2017.1",
- },
-
- "SA2003": {
- Title: `Deferred Lock right after locking, likely meant to defer Unlock instead`,
- Since: "2017.1",
- },
-
- "SA3000": {
- Title: `TestMain doesn't call os.Exit, hiding test failures`,
- Text: `Test executables (and in turn 'go test') exit with a non-zero status
-code if any tests failed. When specifying your own TestMain function,
-it is your responsibility to arrange for this, by calling os.Exit with
-the correct code. The correct code is returned by (*testing.M).Run, so
-the usual way of implementing TestMain is to end it with
-os.Exit(m.Run()).`,
- Since: "2017.1",
- },
-
- "SA3001": {
- Title: `Assigning to b.N in benchmarks distorts the results`,
- Text: `The testing package dynamically sets b.N to improve the reliability of
-benchmarks and uses it in computations to determine the duration of a
-single operation. Benchmark code must not alter b.N as this would
-falsify results.`,
- Since: "2017.1",
- },
-
- "SA4000": {
- Title: `Boolean expression has identical expressions on both sides`,
- Since: "2017.1",
- },
-
- "SA4001": {
- Title: `&*x gets simplified to x, it does not copy x`,
- Since: "2017.1",
- },
-
- "SA4002": {
- Title: `Comparing strings with known different sizes has predictable results`,
- Since: "2017.1",
- },
-
- "SA4003": {
- Title: `Comparing unsigned values against negative values is pointless`,
- Since: "2017.1",
- },
-
- "SA4004": {
- Title: `The loop exits unconditionally after one iteration`,
- Since: "2017.1",
- },
-
- "SA4005": {
- Title: `Field assignment that will never be observed. Did you mean to use a pointer receiver?`,
- Since: "2017.1",
- },
-
- "SA4006": {
- Title: `A value assigned to a variable is never read before being overwritten. Forgotten error check or dead code?`,
- Since: "2017.1",
- },
-
- "SA4008": {
- Title: `The variable in the loop condition never changes, are you incrementing the wrong variable?`,
- Since: "2017.1",
- },
-
- "SA4009": {
- Title: `A function argument is overwritten before its first use`,
- Since: "2017.1",
- },
-
- "SA4010": {
- Title: `The result of append will never be observed anywhere`,
- Since: "2017.1",
- },
-
- "SA4011": {
- Title: `Break statement with no effect. Did you mean to break out of an outer loop?`,
- Since: "2017.1",
- },
-
- "SA4012": {
- Title: `Comparing a value against NaN even though no value is equal to NaN`,
- Since: "2017.1",
- },
-
- "SA4013": {
- Title: `Negating a boolean twice (!!b) is the same as writing b. This is either redundant, or a typo.`,
- Since: "2017.1",
- },
-
- "SA4014": {
- Title: `An if/else if chain has repeated conditions and no side-effects; if the condition didn't match the first time, it won't match the second time, either`,
- Since: "2017.1",
- },
-
- "SA4015": {
- Title: `Calling functions like math.Ceil on floats converted from integers doesn't do anything useful`,
- Since: "2017.1",
- },
-
- "SA4016": {
- Title: `Certain bitwise operations, such as x ^ 0, do not do anything useful`,
- Since: "2017.1",
- },
-
- "SA4017": {
- Title: `A pure function's return value is discarded, making the call pointless`,
- Since: "2017.1",
- },
-
- "SA4018": {
- Title: `Self-assignment of variables`,
- Since: "2017.1",
- },
-
- "SA4019": {
- Title: `Multiple, identical build constraints in the same file`,
- Since: "2017.1",
- },
-
- "SA4020": {
- Title: `Unreachable case clause in a type switch`,
- Text: `In a type switch like the following
-
- type T struct{}
- func (T) Read(b []byte) (int, error) { return 0, nil }
-
- var v interface{} = T{}
-
- switch v.(type) {
- case io.Reader:
- // ...
- case T:
- // unreachable
- }
-
-the second case clause can never be reached because T implements
-io.Reader and case clauses are evaluated in source order.
-
-Another example:
-
- type T struct{}
- func (T) Read(b []byte) (int, error) { return 0, nil }
- func (T) Close() error { return nil }
-
- var v interface{} = T{}
-
- switch v.(type) {
- case io.Reader:
- // ...
- case io.ReadCloser:
- // unreachable
- }
-
-Even though T has a Close method and thus implements io.ReadCloser,
-io.Reader will always match first. The method set of io.Reader is a
-subset of io.ReadCloser. Thus it is impossible to match the second
-case without matching the first case.
-
-
-Structurally equivalent interfaces
-
-A special case of the previous example are structurally identical
-interfaces. Given these declarations
-
- type T error
- type V error
-
- func doSomething() error {
- err, ok := doAnotherThing()
- if ok {
- return T(err)
- }
-
- return U(err)
- }
-
-the following type switch will have an unreachable case clause:
-
- switch doSomething().(type) {
- case T:
- // ...
- case V:
- // unreachable
- }
-
-T will always match before V because they are structurally equivalent
-and therefore doSomething()'s return value implements both.`,
- Since: "2019.2",
- },
-
- "SA4021": {
- Title: `x = append(y) is equivalent to x = y`,
- Since: "2019.2",
- },
-
- "SA4022": {
- Title: `Comparing the address of a variable against nil`,
- Text: `Code such as 'if &x == nil' is meaningless, because taking the address of a variable always yields a non-nil pointer.`,
- Since: "2020.1",
- },
-
- "SA5000": {
- Title: `Assignment to nil map`,
- Since: "2017.1",
- },
-
- "SA5001": {
- Title: `Defering Close before checking for a possible error`,
- Since: "2017.1",
- },
-
- "SA5002": {
- Title: `The empty for loop (for {}) spins and can block the scheduler`,
- Since: "2017.1",
- },
-
- "SA5003": {
- Title: `Defers in infinite loops will never execute`,
- Text: `Defers are scoped to the surrounding function, not the surrounding
-block. In a function that never returns, i.e. one containing an
-infinite loop, defers will never execute.`,
- Since: "2017.1",
- },
-
- "SA5004": {
- Title: `for { select { ... with an empty default branch spins`,
- Since: "2017.1",
- },
-
- "SA5005": {
- Title: `The finalizer references the finalized object, preventing garbage collection`,
- Text: `A finalizer is a function associated with an object that runs when the
-garbage collector is ready to collect said object, that is when the
-object is no longer referenced by anything.
-
-If the finalizer references the object, however, it will always remain
-as the final reference to that object, preventing the garbage
-collector from collecting the object. The finalizer will never run,
-and the object will never be collected, leading to a memory leak. That
-is why the finalizer should instead use its first argument to operate
-on the object. That way, the number of references can temporarily go
-to zero before the object is being passed to the finalizer.`,
- Since: "2017.1",
- },
-
- "SA5006": {
- Title: `Slice index out of bounds`,
- Since: "2017.1",
- },
-
- "SA5007": {
- Title: `Infinite recursive call`,
- Text: `A function that calls itself recursively needs to have an exit
-condition. Otherwise it will recurse forever, until the system runs
-out of memory.
-
-This issue can be caused by simple bugs such as forgetting to add an
-exit condition. It can also happen "on purpose". Some languages have
-tail call optimization which makes certain infinite recursive calls
-safe to use. Go, however, does not implement TCO, and as such a loop
-should be used instead.`,
- Since: "2017.1",
- },
-
- "SA5008": {
- Title: `Invalid struct tag`,
- Since: "2019.2",
- },
-
- "SA5009": {
- Title: `Invalid Printf call`,
- Since: "2019.2",
- },
-
- "SA5010": {
- Title: `Impossible type assertion`,
-
- Text: `Some type assertions can be statically proven to be
-impossible. This is the case when the method sets of both
-arguments of the type assertion conflict with each other, for
-example by containing the same method with different
-signatures.
-
-The Go compiler already applies this check when asserting from an
-interface value to a concrete type. If the concrete type misses
-methods from the interface, or if function signatures don't match,
-then the type assertion can never succeed.
-
-This check applies the same logic when asserting from one interface to
-another. If both interface types contain the same method but with
-different signatures, then the type assertion can never succeed,
-either.`,
-
- Since: "2020.1",
- },
-
- "SA5011": {
- Title: `Possible nil pointer dereference`,
-
- Text: `A pointer is being dereferenced unconditionally, while
-also being checked against nil in another place. This suggests that
-the pointer may be nil and dereferencing it may panic. This is
-commonly a result of improperly ordered code or missing return
-statements. Consider the following examples:
-
- func fn(x *int) {
- fmt.Println(*x)
-
- // This nil check is equally important for the previous dereference
- if x != nil {
- foo(*x)
- }
- }
-
- func TestFoo(t *testing.T) {
- x := compute()
- if x == nil {
- t.Errorf("nil pointer received")
- }
-
- // t.Errorf does not abort the test, so if x is nil, the next line will panic.
- foo(*x)
- }
-
-Staticcheck tries to deduce which functions abort control flow.
-For example, it is aware that a function will not continue
-execution after a call to panic or log.Fatal. However, sometimes
-this detection fails, in particular in the presence of
-conditionals. Consider the following example:
-
- func Log(msg string, level int) {
- fmt.Println(msg)
- if level == levelFatal {
- os.Exit(1)
- }
- }
-
- func Fatal(msg string) {
- Log(msg, levelFatal)
- }
-
- func fn(x *int) {
- if x == nil {
- Fatal("unexpected nil pointer")
- }
- fmt.Println(*x)
- }
-
-Staticcheck will flag the dereference of x, even though it is perfectly
-safe. Staticcheck is not able to deduce that a call to
-Fatal will exit the program. For the time being, the easiest
-workaround is to modify the definition of Fatal like so:
-
- func Fatal(msg string) {
- Log(msg, levelFatal)
- panic("unreachable")
- }
-
-We also hard-code functions from common logging packages such as
-logrus. Please file an issue if we're missing support for a
-popular package.`,
- Since: "2020.1",
- },
-
- "SA6000": {
- Title: `Using regexp.Match or related in a loop, should use regexp.Compile`,
- Since: "2017.1",
- },
-
- "SA6001": {
- Title: `Missing an optimization opportunity when indexing maps by byte slices`,
-
- Text: `Map keys must be comparable, which precludes the use of byte slices.
-This usually leads to using string keys and converting byte slices to
-strings.
-
-Normally, a conversion of a byte slice to a string needs to copy the data and
-causes allocations. The compiler, however, recognizes m[string(b)] and
-uses the data of b directly, without copying it, because it knows that
-the data can't change during the map lookup. This leads to the
-counter-intuitive situation that
-
- k := string(b)
- println(m[k])
- println(m[k])
-
-will be less efficient than
-
- println(m[string(b)])
- println(m[string(b)])
-
-because the first version needs to copy and allocate, while the second
-one does not.
-
-For some history on this optimization, check out commit
-f5f5a8b6209f84961687d993b93ea0d397f5d5bf in the Go repository.`,
- Since: "2017.1",
- },
-
- "SA6002": {
- Title: `Storing non-pointer values in sync.Pool allocates memory`,
- Text: `A sync.Pool is used to avoid unnecessary allocations and reduce the
-amount of work the garbage collector has to do.
-
-When passing a value that is not a pointer to a function that accepts
-an interface, the value needs to be placed on the heap, which means an
-additional allocation. Slices are a common thing to put in sync.Pools,
-and they're structs with 3 fields (length, capacity, and a pointer to
-an array). In order to avoid the extra allocation, one should store a
-pointer to the slice instead.
-
-See the comments on https://go-review.googlesource.com/c/go/+/24371
-that discuss this problem.`,
- Since: "2017.1",
- },
-
- "SA6003": {
- Title: `Converting a string to a slice of runes before ranging over it`,
- Text: `You may want to loop over the runes in a string. Instead of converting
-the string to a slice of runes and looping over that, you can loop
-over the string itself. That is,
-
- for _, r := range s {}
-
-and
-
- for _, r := range []rune(s) {}
-
-will yield the same values. The first version, however, will be faster
-and avoid unnecessary memory allocations.
-
-Do note that if you are interested in the indices, ranging over a
-string and over a slice of runes will yield different indices. The
-first one yields byte offsets, while the second one yields indices in
-the slice of runes.`,
- Since: "2017.1",
- },
-
- "SA6005": {
- Title: `Inefficient string comparison with strings.ToLower or strings.ToUpper`,
- Text: `Converting two strings to the same case and comparing them like so
-
- if strings.ToLower(s1) == strings.ToLower(s2) {
- ...
- }
-
-is significantly more expensive than comparing them with
-strings.EqualFold(s1, s2). This is due to memory usage as well as
-computational complexity.
-
-strings.ToLower will have to allocate memory for the new strings, as
-well as convert both strings fully, even if they differ on the very
-first byte. strings.EqualFold, on the other hand, compares the strings
-one character at a time. It doesn't need to create two intermediate
-strings and can return as soon as the first non-matching character has
-been found.
-
-For a more in-depth explanation of this issue, see
-https://blog.digitalocean.com/how-to-efficiently-compare-strings-in-go/`,
- Since: "2019.2",
- },
-
- "SA9001": {
- Title: `Defers in range loops may not run when you expect them to`,
- Since: "2017.1",
- },
-
- "SA9002": {
- Title: `Using a non-octal os.FileMode that looks like it was meant to be in octal.`,
- Since: "2017.1",
- },
-
- "SA9003": {
- Title: `Empty body in an if or else branch`,
- Since: "2017.1",
- },
-
- "SA9004": {
- Title: `Only the first constant has an explicit type`,
-
- Text: `In a constant declaration such as the following:
-
- const (
- First byte = 1
- Second = 2
- )
-
-the constant Second does not have the same type as the constant First.
-This construct shouldn't be confused with
-
- const (
- First byte = iota
- Second
- )
-
-where First and Second do indeed have the same type. The type is only
-passed on when no explicit value is assigned to the constant.
-
-When declaring enumerations with explicit values it is therefore
-important not to write
-
- const (
- EnumFirst EnumType = 1
- EnumSecond = 2
- EnumThird = 3
- )
-
-This discrepancy in types can cause various confusing behaviors and
-bugs.
-
-
-Wrong type in variable declarations
-
-The most obvious issue with such incorrect enumerations expresses
-itself as a compile error:
-
- package pkg
-
- const (
- EnumFirst uint8 = 1
- EnumSecond = 2
- )
-
- func fn(useFirst bool) {
- x := EnumSecond
- if useFirst {
- x = EnumFirst
- }
- }
-
-fails to compile with
-
- ./const.go:11:5: cannot use EnumFirst (type uint8) as type int in assignment
-
-
-Losing method sets
-
-A more subtle issue occurs with types that have methods and optional
-interfaces. Consider the following:
-
- package main
-
- import "fmt"
-
- type Enum int
-
- func (e Enum) String() string {
- return "an enum"
- }
-
- const (
- EnumFirst Enum = 1
- EnumSecond = 2
- )
-
- func main() {
- fmt.Println(EnumFirst)
- fmt.Println(EnumSecond)
- }
-
-This code will output
-
- an enum
- 2
-
-as EnumSecond has no explicit type, and thus defaults to int.`,
- Since: "2019.1",
- },
-
- "SA9005": {
- Title: `Trying to marshal a struct with no public fields nor custom marshaling`,
- Text: `The encoding/json and encoding/xml packages only operate on exported
-fields in structs, not unexported ones. It is usually an error to try
-to (un)marshal structs that only consist of unexported fields.
-
-This check will not flag calls involving types that define custom
-marshaling behavior, e.g. via MarshalJSON methods. It will also not
-flag empty structs.`,
- Since: "2019.2",
- },
-}
diff --git a/vendor/honnef.co/go/tools/staticcheck/lint.go b/vendor/honnef.co/go/tools/staticcheck/lint.go
deleted file mode 100644
index 16b6dbd8488..00000000000
--- a/vendor/honnef.co/go/tools/staticcheck/lint.go
+++ /dev/null
@@ -1,3825 +0,0 @@
-// Package staticcheck contains a linter for Go source code.
-package staticcheck // import "honnef.co/go/tools/staticcheck"
-
-import (
- "fmt"
- "go/ast"
- "go/constant"
- "go/token"
- "go/types"
- htmltemplate "html/template"
- "net/http"
- "reflect"
- "regexp"
- "regexp/syntax"
- "sort"
- "strconv"
- "strings"
- texttemplate "text/template"
- "unicode"
-
- . "honnef.co/go/tools/arg"
- "honnef.co/go/tools/code"
- "honnef.co/go/tools/deprecated"
- "honnef.co/go/tools/edit"
- "honnef.co/go/tools/facts"
- "honnef.co/go/tools/functions"
- "honnef.co/go/tools/internal/passes/buildir"
- "honnef.co/go/tools/internal/sharedcheck"
- "honnef.co/go/tools/ir"
- "honnef.co/go/tools/ir/irutil"
- "honnef.co/go/tools/lint"
- . "honnef.co/go/tools/lint/lintdsl"
- "honnef.co/go/tools/pattern"
- "honnef.co/go/tools/printf"
- "honnef.co/go/tools/report"
-
- "golang.org/x/tools/go/analysis"
- "golang.org/x/tools/go/analysis/passes/inspect"
- "golang.org/x/tools/go/ast/astutil"
- "golang.org/x/tools/go/ast/inspector"
- "golang.org/x/tools/go/types/typeutil"
-)
-
-func checkSortSlice(call *Call) {
- c := call.Instr.Common().StaticCallee()
- arg := call.Args[0]
-
- T := arg.Value.Value.Type().Underlying()
- switch T.(type) {
- case *types.Interface:
- // we don't know.
- // TODO(dh): if the value is a phi node we can look at its edges
- if k, ok := arg.Value.Value.(*ir.Const); ok && k.Value == nil {
- // literal nil, e.g. sort.Sort(nil, ...)
- arg.Invalid(fmt.Sprintf("cannot call %s on nil literal", c))
- }
- case *types.Slice:
- // this is fine
- default:
- // this is not fine
- arg.Invalid(fmt.Sprintf("%s must only be called on slices, was called on %s", c, T))
- }
-}
-
-func validRegexp(call *Call) {
- arg := call.Args[0]
- err := ValidateRegexp(arg.Value)
- if err != nil {
- arg.Invalid(err.Error())
- }
-}
-
-type runeSlice []rune
-
-func (rs runeSlice) Len() int { return len(rs) }
-func (rs runeSlice) Less(i int, j int) bool { return rs[i] < rs[j] }
-func (rs runeSlice) Swap(i int, j int) { rs[i], rs[j] = rs[j], rs[i] }
-
-func utf8Cutset(call *Call) {
- arg := call.Args[1]
- if InvalidUTF8(arg.Value) {
- arg.Invalid(MsgInvalidUTF8)
- }
-}
-
-func uniqueCutset(call *Call) {
- arg := call.Args[1]
- if !UniqueStringCutset(arg.Value) {
- arg.Invalid(MsgNonUniqueCutset)
- }
-}
-
-func unmarshalPointer(name string, arg int) CallCheck {
- return func(call *Call) {
- if !Pointer(call.Args[arg].Value) {
- call.Args[arg].Invalid(fmt.Sprintf("%s expects to unmarshal into a pointer, but the provided value is not a pointer", name))
- }
- }
-}
-
-func pointlessIntMath(call *Call) {
- if ConvertedFromInt(call.Args[0].Value) {
- call.Invalid(fmt.Sprintf("calling %s on a converted integer is pointless", code.CallName(call.Instr.Common())))
- }
-}
-
-func checkValidHostPort(arg int) CallCheck {
- return func(call *Call) {
- if !ValidHostPort(call.Args[arg].Value) {
- call.Args[arg].Invalid(MsgInvalidHostPort)
- }
- }
-}
-
-var (
- checkRegexpRules = map[string]CallCheck{
- "regexp.MustCompile": validRegexp,
- "regexp.Compile": validRegexp,
- "regexp.Match": validRegexp,
- "regexp.MatchReader": validRegexp,
- "regexp.MatchString": validRegexp,
- }
-
- checkTimeParseRules = map[string]CallCheck{
- "time.Parse": func(call *Call) {
- arg := call.Args[Arg("time.Parse.layout")]
- err := ValidateTimeLayout(arg.Value)
- if err != nil {
- arg.Invalid(err.Error())
- }
- },
- }
-
- checkEncodingBinaryRules = map[string]CallCheck{
- "encoding/binary.Write": func(call *Call) {
- arg := call.Args[Arg("encoding/binary.Write.data")]
- if !CanBinaryMarshal(call.Pass, arg.Value) {
- arg.Invalid(fmt.Sprintf("value of type %s cannot be used with binary.Write", arg.Value.Value.Type()))
- }
- },
- }
-
- checkURLsRules = map[string]CallCheck{
- "net/url.Parse": func(call *Call) {
- arg := call.Args[Arg("net/url.Parse.rawurl")]
- err := ValidateURL(arg.Value)
- if err != nil {
- arg.Invalid(err.Error())
- }
- },
- }
-
- checkSyncPoolValueRules = map[string]CallCheck{
- "(*sync.Pool).Put": func(call *Call) {
- arg := call.Args[Arg("(*sync.Pool).Put.x")]
- typ := arg.Value.Value.Type()
- if !code.IsPointerLike(typ) {
- arg.Invalid("argument should be pointer-like to avoid allocations")
- }
- },
- }
-
- checkRegexpFindAllRules = map[string]CallCheck{
- "(*regexp.Regexp).FindAll": RepeatZeroTimes("a FindAll method", 1),
- "(*regexp.Regexp).FindAllIndex": RepeatZeroTimes("a FindAll method", 1),
- "(*regexp.Regexp).FindAllString": RepeatZeroTimes("a FindAll method", 1),
- "(*regexp.Regexp).FindAllStringIndex": RepeatZeroTimes("a FindAll method", 1),
- "(*regexp.Regexp).FindAllStringSubmatch": RepeatZeroTimes("a FindAll method", 1),
- "(*regexp.Regexp).FindAllStringSubmatchIndex": RepeatZeroTimes("a FindAll method", 1),
- "(*regexp.Regexp).FindAllSubmatch": RepeatZeroTimes("a FindAll method", 1),
- "(*regexp.Regexp).FindAllSubmatchIndex": RepeatZeroTimes("a FindAll method", 1),
- }
-
- checkUTF8CutsetRules = map[string]CallCheck{
- "strings.IndexAny": utf8Cutset,
- "strings.LastIndexAny": utf8Cutset,
- "strings.ContainsAny": utf8Cutset,
- "strings.Trim": utf8Cutset,
- "strings.TrimLeft": utf8Cutset,
- "strings.TrimRight": utf8Cutset,
- }
-
- checkUniqueCutsetRules = map[string]CallCheck{
- "strings.Trim": uniqueCutset,
- "strings.TrimLeft": uniqueCutset,
- "strings.TrimRight": uniqueCutset,
- }
-
- checkUnmarshalPointerRules = map[string]CallCheck{
- "encoding/xml.Unmarshal": unmarshalPointer("xml.Unmarshal", 1),
- "(*encoding/xml.Decoder).Decode": unmarshalPointer("Decode", 0),
- "(*encoding/xml.Decoder).DecodeElement": unmarshalPointer("DecodeElement", 0),
- "encoding/json.Unmarshal": unmarshalPointer("json.Unmarshal", 1),
- "(*encoding/json.Decoder).Decode": unmarshalPointer("Decode", 0),
- }
-
- checkUnbufferedSignalChanRules = map[string]CallCheck{
- "os/signal.Notify": func(call *Call) {
- arg := call.Args[Arg("os/signal.Notify.c")]
- if UnbufferedChannel(arg.Value) {
- arg.Invalid("the channel used with signal.Notify should be buffered")
- }
- },
- }
-
- checkMathIntRules = map[string]CallCheck{
- "math.Ceil": pointlessIntMath,
- "math.Floor": pointlessIntMath,
- "math.IsNaN": pointlessIntMath,
- "math.Trunc": pointlessIntMath,
- "math.IsInf": pointlessIntMath,
- }
-
- checkStringsReplaceZeroRules = map[string]CallCheck{
- "strings.Replace": RepeatZeroTimes("strings.Replace", 3),
- "bytes.Replace": RepeatZeroTimes("bytes.Replace", 3),
- }
-
- checkListenAddressRules = map[string]CallCheck{
- "net/http.ListenAndServe": checkValidHostPort(0),
- "net/http.ListenAndServeTLS": checkValidHostPort(0),
- }
-
- checkBytesEqualIPRules = map[string]CallCheck{
- "bytes.Equal": func(call *Call) {
- if ConvertedFrom(call.Args[Arg("bytes.Equal.a")].Value, "net.IP") &&
- ConvertedFrom(call.Args[Arg("bytes.Equal.b")].Value, "net.IP") {
- call.Invalid("use net.IP.Equal to compare net.IPs, not bytes.Equal")
- }
- },
- }
-
- checkRegexpMatchLoopRules = map[string]CallCheck{
- "regexp.Match": loopedRegexp("regexp.Match"),
- "regexp.MatchReader": loopedRegexp("regexp.MatchReader"),
- "regexp.MatchString": loopedRegexp("regexp.MatchString"),
- }
-
- checkNoopMarshal = map[string]CallCheck{
- // TODO(dh): should we really flag XML? Even an empty struct
- // produces a non-zero amount of data, namely its type name.
- // Let's see if we encounter any false positives.
- //
- // Also, should we flag gob?
- "encoding/json.Marshal": checkNoopMarshalImpl(Arg("json.Marshal.v"), "MarshalJSON", "MarshalText"),
- "encoding/xml.Marshal": checkNoopMarshalImpl(Arg("xml.Marshal.v"), "MarshalXML", "MarshalText"),
- "(*encoding/json.Encoder).Encode": checkNoopMarshalImpl(Arg("(*encoding/json.Encoder).Encode.v"), "MarshalJSON", "MarshalText"),
- "(*encoding/xml.Encoder).Encode": checkNoopMarshalImpl(Arg("(*encoding/xml.Encoder).Encode.v"), "MarshalXML", "MarshalText"),
-
- "encoding/json.Unmarshal": checkNoopMarshalImpl(Arg("json.Unmarshal.v"), "UnmarshalJSON", "UnmarshalText"),
- "encoding/xml.Unmarshal": checkNoopMarshalImpl(Arg("xml.Unmarshal.v"), "UnmarshalXML", "UnmarshalText"),
- "(*encoding/json.Decoder).Decode": checkNoopMarshalImpl(Arg("(*encoding/json.Decoder).Decode.v"), "UnmarshalJSON", "UnmarshalText"),
- "(*encoding/xml.Decoder).Decode": checkNoopMarshalImpl(Arg("(*encoding/xml.Decoder).Decode.v"), "UnmarshalXML", "UnmarshalText"),
- }
-
- checkUnsupportedMarshal = map[string]CallCheck{
- "encoding/json.Marshal": checkUnsupportedMarshalImpl(Arg("json.Marshal.v"), "json", "MarshalJSON", "MarshalText"),
- "encoding/xml.Marshal": checkUnsupportedMarshalImpl(Arg("xml.Marshal.v"), "xml", "MarshalXML", "MarshalText"),
- "(*encoding/json.Encoder).Encode": checkUnsupportedMarshalImpl(Arg("(*encoding/json.Encoder).Encode.v"), "json", "MarshalJSON", "MarshalText"),
- "(*encoding/xml.Encoder).Encode": checkUnsupportedMarshalImpl(Arg("(*encoding/xml.Encoder).Encode.v"), "xml", "MarshalXML", "MarshalText"),
- }
-
- checkAtomicAlignment = map[string]CallCheck{
- "sync/atomic.AddInt64": checkAtomicAlignmentImpl,
- "sync/atomic.AddUint64": checkAtomicAlignmentImpl,
- "sync/atomic.CompareAndSwapInt64": checkAtomicAlignmentImpl,
- "sync/atomic.CompareAndSwapUint64": checkAtomicAlignmentImpl,
- "sync/atomic.LoadInt64": checkAtomicAlignmentImpl,
- "sync/atomic.LoadUint64": checkAtomicAlignmentImpl,
- "sync/atomic.StoreInt64": checkAtomicAlignmentImpl,
- "sync/atomic.StoreUint64": checkAtomicAlignmentImpl,
- "sync/atomic.SwapInt64": checkAtomicAlignmentImpl,
- "sync/atomic.SwapUint64": checkAtomicAlignmentImpl,
- }
-
- // TODO(dh): detect printf wrappers
- checkPrintfRules = map[string]CallCheck{
- "fmt.Errorf": func(call *Call) { checkPrintfCall(call, 0, 1) },
- "fmt.Printf": func(call *Call) { checkPrintfCall(call, 0, 1) },
- "fmt.Sprintf": func(call *Call) { checkPrintfCall(call, 0, 1) },
- "fmt.Fprintf": func(call *Call) { checkPrintfCall(call, 1, 2) },
- "golang.org/x/xerrors.Errorf": func(call *Call) { checkPrintfCall(call, 0, 1) },
- }
-
- checkSortSliceRules = map[string]CallCheck{
- "sort.Slice": checkSortSlice,
- "sort.SliceIsSorted": checkSortSlice,
- "sort.SliceStable": checkSortSlice,
- }
-
- checkWithValueKeyRules = map[string]CallCheck{
- "context.WithValue": checkWithValueKey,
- }
-)
-
-func checkPrintfCall(call *Call, fIdx, vIdx int) {
- f := call.Args[fIdx]
- var args []ir.Value
- switch v := call.Args[vIdx].Value.Value.(type) {
- case *ir.Slice:
- var ok bool
- args, ok = irutil.Vararg(v)
- if !ok {
- // We don't know what the actual arguments to the function are
- return
- }
- case *ir.Const:
- // nil, i.e. no arguments
- default:
- // We don't know what the actual arguments to the function are
- return
- }
- checkPrintfCallImpl(f, f.Value.Value, args)
-}
-
-type verbFlag int
-
-const (
- isInt verbFlag = 1 << iota
- isBool
- isFP
- isString
- isPointer
- // Verbs that accept "pseudo pointers" will sometimes dereference
- // non-nil pointers. For example, %x on a non-nil *struct will print the
- // individual fields, but on a nil pointer it will print the address.
- isPseudoPointer
- isSlice
- isAny
- noRecurse
-)
-
-var verbs = [...]verbFlag{
- 'b': isPseudoPointer | isInt | isFP,
- 'c': isInt,
- 'd': isPseudoPointer | isInt,
- 'e': isFP,
- 'E': isFP,
- 'f': isFP,
- 'F': isFP,
- 'g': isFP,
- 'G': isFP,
- 'o': isPseudoPointer | isInt,
- 'O': isPseudoPointer | isInt,
- 'p': isSlice | isPointer | noRecurse,
- 'q': isInt | isString,
- 's': isString,
- 't': isBool,
- 'T': isAny,
- 'U': isInt,
- 'v': isAny,
- 'X': isPseudoPointer | isInt | isFP | isString,
- 'x': isPseudoPointer | isInt | isFP | isString,
-}
-
-func checkPrintfCallImpl(carg *Argument, f ir.Value, args []ir.Value) {
- var msCache *typeutil.MethodSetCache
- if f.Parent() != nil {
- msCache = &f.Parent().Prog.MethodSets
- }
-
- elem := func(T types.Type, verb rune) ([]types.Type, bool) {
- if verbs[verb]&noRecurse != 0 {
- return []types.Type{T}, false
- }
- switch T := T.(type) {
- case *types.Slice:
- if verbs[verb]&isSlice != 0 {
- return []types.Type{T}, false
- }
- if verbs[verb]&isString != 0 && code.IsType(T.Elem().Underlying(), "byte") {
- return []types.Type{T}, false
- }
- return []types.Type{T.Elem()}, true
- case *types.Map:
- key := T.Key()
- val := T.Elem()
- return []types.Type{key, val}, true
- case *types.Struct:
- out := make([]types.Type, 0, T.NumFields())
- for i := 0; i < T.NumFields(); i++ {
- out = append(out, T.Field(i).Type())
- }
- return out, true
- case *types.Array:
- return []types.Type{T.Elem()}, true
- default:
- return []types.Type{T}, false
- }
- }
- isInfo := func(T types.Type, info types.BasicInfo) bool {
- basic, ok := T.Underlying().(*types.Basic)
- return ok && basic.Info()&info != 0
- }
-
- isStringer := func(T types.Type, ms *types.MethodSet) bool {
- sel := ms.Lookup(nil, "String")
- if sel == nil {
- return false
- }
- fn, ok := sel.Obj().(*types.Func)
- if !ok {
- // should be unreachable
- return false
- }
- sig := fn.Type().(*types.Signature)
- if sig.Params().Len() != 0 {
- return false
- }
- if sig.Results().Len() != 1 {
- return false
- }
- if !code.IsType(sig.Results().At(0).Type(), "string") {
- return false
- }
- return true
- }
- isError := func(T types.Type, ms *types.MethodSet) bool {
- sel := ms.Lookup(nil, "Error")
- if sel == nil {
- return false
- }
- fn, ok := sel.Obj().(*types.Func)
- if !ok {
- // should be unreachable
- return false
- }
- sig := fn.Type().(*types.Signature)
- if sig.Params().Len() != 0 {
- return false
- }
- if sig.Results().Len() != 1 {
- return false
- }
- if !code.IsType(sig.Results().At(0).Type(), "string") {
- return false
- }
- return true
- }
-
- isFormatter := func(T types.Type, ms *types.MethodSet) bool {
- sel := ms.Lookup(nil, "Format")
- if sel == nil {
- return false
- }
- fn, ok := sel.Obj().(*types.Func)
- if !ok {
- // should be unreachable
- return false
- }
- sig := fn.Type().(*types.Signature)
- if sig.Params().Len() != 2 {
- return false
- }
- // TODO(dh): check the types of the arguments for more
- // precision
- if sig.Results().Len() != 0 {
- return false
- }
- return true
- }
-
- seen := map[types.Type]bool{}
- var checkType func(verb rune, T types.Type, top bool) bool
- checkType = func(verb rune, T types.Type, top bool) bool {
- if top {
- for k := range seen {
- delete(seen, k)
- }
- }
- if seen[T] {
- return true
- }
- seen[T] = true
- if int(verb) >= len(verbs) {
- // Unknown verb
- return true
- }
-
- flags := verbs[verb]
- if flags == 0 {
- // Unknown verb
- return true
- }
-
- ms := msCache.MethodSet(T)
- if isFormatter(T, ms) {
- // the value is responsible for formatting itself
- return true
- }
-
- if flags&isString != 0 && (isStringer(T, ms) || isError(T, ms)) {
- // Check for stringer early because we're about to dereference
- return true
- }
-
- T = T.Underlying()
- if flags&(isPointer|isPseudoPointer) == 0 && top {
- T = code.Dereference(T)
- }
- if flags&isPseudoPointer != 0 && top {
- t := code.Dereference(T)
- if _, ok := t.Underlying().(*types.Struct); ok {
- T = t
- }
- }
-
- if _, ok := T.(*types.Interface); ok {
- // We don't know what's in the interface
- return true
- }
-
- var info types.BasicInfo
- if flags&isInt != 0 {
- info |= types.IsInteger
- }
- if flags&isBool != 0 {
- info |= types.IsBoolean
- }
- if flags&isFP != 0 {
- info |= types.IsFloat | types.IsComplex
- }
- if flags&isString != 0 {
- info |= types.IsString
- }
-
- if info != 0 && isInfo(T, info) {
- return true
- }
-
- if flags&isString != 0 && (code.IsType(T, "[]byte") || isStringer(T, ms) || isError(T, ms)) {
- return true
- }
-
- if flags&isPointer != 0 && code.IsPointerLike(T) {
- return true
- }
- if flags&isPseudoPointer != 0 {
- switch U := T.Underlying().(type) {
- case *types.Pointer:
- if !top {
- return true
- }
-
- if _, ok := U.Elem().Underlying().(*types.Struct); !ok {
- // TODO(dh): can this condition ever be false? For
- // *T, if T is a struct, we'll already have
- // dereferenced it, meaning the *types.Pointer
- // branch couldn't have been taken. For T that
- // aren't structs, this condition will always
- // evaluate to true.
- return true
- }
- case *types.Chan, *types.Signature:
- // Channels and functions are always treated as
- // pointers and never recursed into.
- return true
- case *types.Basic:
- if U.Kind() == types.UnsafePointer {
- return true
- }
- case *types.Interface:
- // we will already have bailed if the type is an
- // interface.
- panic("unreachable")
- default:
- // other pointer-like types, such as maps or slices,
- // will be printed element-wise.
- }
- }
-
- if flags&isSlice != 0 {
- if _, ok := T.(*types.Slice); ok {
- return true
- }
- }
-
- if flags&isAny != 0 {
- return true
- }
-
- elems, ok := elem(T.Underlying(), verb)
- if !ok {
- return false
- }
- for _, elem := range elems {
- if !checkType(verb, elem, false) {
- return false
- }
- }
-
- return true
- }
-
- k, ok := f.(*ir.Const)
- if !ok {
- return
- }
- actions, err := printf.Parse(constant.StringVal(k.Value))
- if err != nil {
- carg.Invalid("couldn't parse format string")
- return
- }
-
- ptr := 1
- hasExplicit := false
-
- checkStar := func(verb printf.Verb, star printf.Argument) bool {
- if star, ok := star.(printf.Star); ok {
- idx := 0
- if star.Index == -1 {
- idx = ptr
- ptr++
- } else {
- hasExplicit = true
- idx = star.Index
- ptr = star.Index + 1
- }
- if idx == 0 {
- carg.Invalid(fmt.Sprintf("Printf format %s reads invalid arg 0; indices are 1-based", verb.Raw))
- return false
- }
- if idx > len(args) {
- carg.Invalid(
- fmt.Sprintf("Printf format %s reads arg #%d, but call has only %d args",
- verb.Raw, idx, len(args)))
- return false
- }
- if arg, ok := args[idx-1].(*ir.MakeInterface); ok {
- if !isInfo(arg.X.Type(), types.IsInteger) {
- carg.Invalid(fmt.Sprintf("Printf format %s reads non-int arg #%d as argument of *", verb.Raw, idx))
- }
- }
- }
- return true
- }
-
- // We only report one problem per format string. Making a
- // mistake with an index tends to invalidate all future
- // implicit indices.
- for _, action := range actions {
- verb, ok := action.(printf.Verb)
- if !ok {
- continue
- }
-
- if !checkStar(verb, verb.Width) || !checkStar(verb, verb.Precision) {
- return
- }
-
- off := ptr
- if verb.Value != -1 {
- hasExplicit = true
- off = verb.Value
- }
- if off > len(args) {
- carg.Invalid(
- fmt.Sprintf("Printf format %s reads arg #%d, but call has only %d args",
- verb.Raw, off, len(args)))
- return
- } else if verb.Value == 0 && verb.Letter != '%' {
- carg.Invalid(fmt.Sprintf("Printf format %s reads invalid arg 0; indices are 1-based", verb.Raw))
- return
- } else if off != 0 {
- arg, ok := args[off-1].(*ir.MakeInterface)
- if ok {
- if !checkType(verb.Letter, arg.X.Type(), true) {
- carg.Invalid(fmt.Sprintf("Printf format %s has arg #%d of wrong type %s",
- verb.Raw, ptr, args[ptr-1].(*ir.MakeInterface).X.Type()))
- return
- }
- }
- }
-
- switch verb.Value {
- case -1:
- // Consume next argument
- ptr++
- case 0:
- // Don't consume any arguments
- default:
- ptr = verb.Value + 1
- }
- }
-
- if !hasExplicit && ptr <= len(args) {
- carg.Invalid(fmt.Sprintf("Printf call needs %d args but has %d args", ptr-1, len(args)))
- }
-}
-
-func checkAtomicAlignmentImpl(call *Call) {
- sizes := call.Pass.TypesSizes
- if sizes.Sizeof(types.Typ[types.Uintptr]) != 4 {
- // Not running on a 32-bit platform
- return
- }
- v, ok := call.Args[0].Value.Value.(*ir.FieldAddr)
- if !ok {
- // TODO(dh): also check indexing into arrays and slices
- return
- }
- T := v.X.Type().Underlying().(*types.Pointer).Elem().Underlying().(*types.Struct)
- fields := make([]*types.Var, 0, T.NumFields())
- for i := 0; i < T.NumFields() && i <= v.Field; i++ {
- fields = append(fields, T.Field(i))
- }
-
- off := sizes.Offsetsof(fields)[v.Field]
- if off%8 != 0 {
- msg := fmt.Sprintf("address of non 64-bit aligned field %s passed to %s",
- T.Field(v.Field).Name(),
- code.CallName(call.Instr.Common()))
- call.Invalid(msg)
- }
-}
-
-func checkNoopMarshalImpl(argN int, meths ...string) CallCheck {
- return func(call *Call) {
- if code.IsGenerated(call.Pass, call.Instr.Pos()) {
- return
- }
- arg := call.Args[argN]
- T := arg.Value.Value.Type()
- Ts, ok := code.Dereference(T).Underlying().(*types.Struct)
- if !ok {
- return
- }
- if Ts.NumFields() == 0 {
- return
- }
- fields := code.FlattenFields(Ts)
- for _, field := range fields {
- if field.Var.Exported() {
- return
- }
- }
- // OPT(dh): we could use a method set cache here
- ms := call.Instr.Parent().Prog.MethodSets.MethodSet(T)
- // TODO(dh): we're not checking the signature, which can cause false negatives.
- // This isn't a huge problem, however, since vet complains about incorrect signatures.
- for _, meth := range meths {
- if ms.Lookup(nil, meth) != nil {
- return
- }
- }
- arg.Invalid("struct doesn't have any exported fields, nor custom marshaling")
- }
-}
-
-func checkUnsupportedMarshalImpl(argN int, tag string, meths ...string) CallCheck {
- // TODO(dh): flag slices and maps of unsupported types
- return func(call *Call) {
- msCache := &call.Instr.Parent().Prog.MethodSets
-
- arg := call.Args[argN]
- T := arg.Value.Value.Type()
- Ts, ok := code.Dereference(T).Underlying().(*types.Struct)
- if !ok {
- return
- }
- ms := msCache.MethodSet(T)
- // TODO(dh): we're not checking the signature, which can cause false negatives.
- // This isn't a huge problem, however, since vet complains about incorrect signatures.
- for _, meth := range meths {
- if ms.Lookup(nil, meth) != nil {
- return
- }
- }
- fields := code.FlattenFields(Ts)
- for _, field := range fields {
- if !(field.Var.Exported()) {
- continue
- }
- if reflect.StructTag(field.Tag).Get(tag) == "-" {
- continue
- }
- ms := msCache.MethodSet(field.Var.Type())
- // TODO(dh): we're not checking the signature, which can cause false negatives.
- // This isn't a huge problem, however, since vet complains about incorrect signatures.
- for _, meth := range meths {
- if ms.Lookup(nil, meth) != nil {
- return
- }
- }
- switch field.Var.Type().Underlying().(type) {
- case *types.Chan, *types.Signature:
- arg.Invalid(fmt.Sprintf("trying to marshal chan or func value, field %s", fieldPath(T, field.Path)))
- }
- }
- }
-}
-
-func fieldPath(start types.Type, indices []int) string {
- p := start.String()
- for _, idx := range indices {
- field := code.Dereference(start).Underlying().(*types.Struct).Field(idx)
- start = field.Type()
- p += "." + field.Name()
- }
- return p
-}
-
-func isInLoop(b *ir.BasicBlock) bool {
- sets := functions.FindLoops(b.Parent())
- for _, set := range sets {
- if set.Has(b) {
- return true
- }
- }
- return false
-}
-
-func CheckUntrappableSignal(pass *analysis.Pass) (interface{}, error) {
- fn := func(node ast.Node) {
- call := node.(*ast.CallExpr)
- if !code.IsCallToAnyAST(pass, call,
- "os/signal.Ignore", "os/signal.Notify", "os/signal.Reset") {
- return
- }
-
- hasSigterm := false
- for _, arg := range call.Args {
- if conv, ok := arg.(*ast.CallExpr); ok && isName(pass, conv.Fun, "os.Signal") {
- arg = conv.Args[0]
- }
-
- if isName(pass, arg, "syscall.SIGTERM") {
- hasSigterm = true
- break
- }
-
- }
- for i, arg := range call.Args {
- if conv, ok := arg.(*ast.CallExpr); ok && isName(pass, conv.Fun, "os.Signal") {
- arg = conv.Args[0]
- }
-
- if isName(pass, arg, "os.Kill") || isName(pass, arg, "syscall.SIGKILL") {
- var fixes []analysis.SuggestedFix
- if !hasSigterm {
- nargs := make([]ast.Expr, len(call.Args))
- for j, a := range call.Args {
- if i == j {
- nargs[j] = Selector("syscall", "SIGTERM")
- } else {
- nargs[j] = a
- }
- }
- ncall := *call
- ncall.Args = nargs
- fixes = append(fixes, edit.Fix(fmt.Sprintf("use syscall.SIGTERM instead of %s", report.Render(pass, arg)), edit.ReplaceWithNode(pass.Fset, call, &ncall)))
- }
- nargs := make([]ast.Expr, 0, len(call.Args))
- for j, a := range call.Args {
- if i == j {
- continue
- }
- nargs = append(nargs, a)
- }
- ncall := *call
- ncall.Args = nargs
- fixes = append(fixes, edit.Fix(fmt.Sprintf("remove %s from list of arguments", report.Render(pass, arg)), edit.ReplaceWithNode(pass.Fset, call, &ncall)))
- report.Report(pass, arg, fmt.Sprintf("%s cannot be trapped (did you mean syscall.SIGTERM?)", report.Render(pass, arg)), report.Fixes(fixes...))
- }
- if isName(pass, arg, "syscall.SIGSTOP") {
- nargs := make([]ast.Expr, 0, len(call.Args)-1)
- for j, a := range call.Args {
- if i == j {
- continue
- }
- nargs = append(nargs, a)
- }
- ncall := *call
- ncall.Args = nargs
- report.Report(pass, arg, "syscall.SIGSTOP cannot be trapped", report.Fixes(edit.Fix("remove syscall.SIGSTOP from list of arguments", edit.ReplaceWithNode(pass.Fset, call, &ncall))))
- }
- }
- }
- code.Preorder(pass, fn, (*ast.CallExpr)(nil))
- return nil, nil
-}
-
-func CheckTemplate(pass *analysis.Pass) (interface{}, error) {
- fn := func(node ast.Node) {
- call := node.(*ast.CallExpr)
- var kind string
- switch code.CallNameAST(pass, call) {
- case "(*text/template.Template).Parse":
- kind = "text"
- case "(*html/template.Template).Parse":
- kind = "html"
- default:
- return
- }
- sel := call.Fun.(*ast.SelectorExpr)
- if !code.IsCallToAnyAST(pass, sel.X, "text/template.New", "html/template.New") {
- // TODO(dh): this is a cheap workaround for templates with
- // different delims. A better solution with less false
- // negatives would use data flow analysis to see where the
- // template comes from and where it has been
- return
- }
- s, ok := code.ExprToString(pass, call.Args[Arg("(*text/template.Template).Parse.text")])
- if !ok {
- return
- }
- var err error
- switch kind {
- case "text":
- _, err = texttemplate.New("").Parse(s)
- case "html":
- _, err = htmltemplate.New("").Parse(s)
- }
- if err != nil {
- // TODO(dominikh): whitelist other parse errors, if any
- if strings.Contains(err.Error(), "unexpected") {
- report.Report(pass, call.Args[Arg("(*text/template.Template).Parse.text")], err.Error())
- }
- }
- }
- code.Preorder(pass, fn, (*ast.CallExpr)(nil))
- return nil, nil
-}
-
-var (
- checkTimeSleepConstantPatternRns = pattern.MustParse(`(BinaryExpr duration "*" (SelectorExpr (Ident "time") (Ident "Nanosecond")))`)
- checkTimeSleepConstantPatternRs = pattern.MustParse(`(BinaryExpr duration "*" (SelectorExpr (Ident "time") (Ident "Second")))`)
-)
-
-func CheckTimeSleepConstant(pass *analysis.Pass) (interface{}, error) {
- fn := func(node ast.Node) {
- call := node.(*ast.CallExpr)
- if !code.IsCallToAST(pass, call, "time.Sleep") {
- return
- }
- lit, ok := call.Args[Arg("time.Sleep.d")].(*ast.BasicLit)
- if !ok {
- return
- }
- n, err := strconv.Atoi(lit.Value)
- if err != nil {
- return
- }
- if n == 0 || n > 120 {
- // time.Sleep(0) is a seldom used pattern in concurrency
- // tests. >120 might be intentional. 120 was chosen
- // because the user could've meant 2 minutes.
- return
- }
-
- report.Report(pass, lit,
- fmt.Sprintf("sleeping for %d nanoseconds is probably a bug; be explicit if it isn't", n), report.Fixes(
- edit.Fix("explicitly use nanoseconds", edit.ReplaceWithPattern(pass, checkTimeSleepConstantPatternRns, pattern.State{"duration": lit}, lit)),
- edit.Fix("use seconds", edit.ReplaceWithPattern(pass, checkTimeSleepConstantPatternRs, pattern.State{"duration": lit}, lit))))
- }
- code.Preorder(pass, fn, (*ast.CallExpr)(nil))
- return nil, nil
-}
-
-var checkWaitgroupAddQ = pattern.MustParse(`
- (GoStmt
- (CallExpr
- (FuncLit
- _
- call@(CallExpr (Function "(*sync.WaitGroup).Add") _):_) _))`)
-
-func CheckWaitgroupAdd(pass *analysis.Pass) (interface{}, error) {
- fn := func(node ast.Node) {
- if m, ok := Match(pass, checkWaitgroupAddQ, node); ok {
- call := m.State["call"].(ast.Node)
- report.Report(pass, call, fmt.Sprintf("should call %s before starting the goroutine to avoid a race", report.Render(pass, call)))
- }
- }
- code.Preorder(pass, fn, (*ast.GoStmt)(nil))
- return nil, nil
-}
-
-func CheckInfiniteEmptyLoop(pass *analysis.Pass) (interface{}, error) {
- fn := func(node ast.Node) {
- loop := node.(*ast.ForStmt)
- if len(loop.Body.List) != 0 || loop.Post != nil {
- return
- }
-
- if loop.Init != nil {
- // TODO(dh): this isn't strictly necessary, it just makes
- // the check easier.
- return
- }
- // An empty loop is bad news in two cases: 1) The loop has no
- // condition. In that case, it's just a loop that spins
- // forever and as fast as it can, keeping a core busy. 2) The
- // loop condition only consists of variable or field reads and
- // operators on those. The only way those could change their
- // value is with unsynchronised access, which constitutes a
- // data race.
- //
- // If the condition contains any function calls, its behaviour
- // is dynamic and the loop might terminate. Similarly for
- // channel receives.
-
- if loop.Cond != nil {
- if code.MayHaveSideEffects(pass, loop.Cond, nil) {
- return
- }
- if ident, ok := loop.Cond.(*ast.Ident); ok {
- if k, ok := pass.TypesInfo.ObjectOf(ident).(*types.Const); ok {
- if !constant.BoolVal(k.Val()) {
- // don't flag `for false {}` loops. They're a debug aid.
- return
- }
- }
- }
- report.Report(pass, loop, "loop condition never changes or has a race condition")
- }
- report.Report(pass, loop, "this loop will spin, using 100%% CPU", report.ShortRange())
- }
- code.Preorder(pass, fn, (*ast.ForStmt)(nil))
- return nil, nil
-}
-
-func CheckDeferInInfiniteLoop(pass *analysis.Pass) (interface{}, error) {
- fn := func(node ast.Node) {
- mightExit := false
- var defers []ast.Stmt
- loop := node.(*ast.ForStmt)
- if loop.Cond != nil {
- return
- }
- fn2 := func(node ast.Node) bool {
- switch stmt := node.(type) {
- case *ast.ReturnStmt:
- mightExit = true
- return false
- case *ast.BranchStmt:
- // TODO(dominikh): if this sees a break in a switch or
- // select, it doesn't check if it breaks the loop or
- // just the select/switch. This causes some false
- // negatives.
- if stmt.Tok == token.BREAK {
- mightExit = true
- return false
- }
- case *ast.DeferStmt:
- defers = append(defers, stmt)
- case *ast.FuncLit:
- // Don't look into function bodies
- return false
- }
- return true
- }
- ast.Inspect(loop.Body, fn2)
- if mightExit {
- return
- }
- for _, stmt := range defers {
- report.Report(pass, stmt, "defers in this infinite loop will never run")
- }
- }
- code.Preorder(pass, fn, (*ast.ForStmt)(nil))
- return nil, nil
-}
-
-func CheckDubiousDeferInChannelRangeLoop(pass *analysis.Pass) (interface{}, error) {
- fn := func(node ast.Node) {
- loop := node.(*ast.RangeStmt)
- typ := pass.TypesInfo.TypeOf(loop.X)
- _, ok := typ.Underlying().(*types.Chan)
- if !ok {
- return
- }
- fn2 := func(node ast.Node) bool {
- switch stmt := node.(type) {
- case *ast.DeferStmt:
- report.Report(pass, stmt, "defers in this range loop won't run unless the channel gets closed")
- case *ast.FuncLit:
- // Don't look into function bodies
- return false
- }
- return true
- }
- ast.Inspect(loop.Body, fn2)
- }
- code.Preorder(pass, fn, (*ast.RangeStmt)(nil))
- return nil, nil
-}
-
-func CheckTestMainExit(pass *analysis.Pass) (interface{}, error) {
- var (
- fnmain ast.Node
- callsExit bool
- callsRun bool
- arg types.Object
- )
- fn := func(node ast.Node, push bool) bool {
- if !push {
- if fnmain != nil && node == fnmain {
- if !callsExit && callsRun {
- report.Report(pass, fnmain, "TestMain should call os.Exit to set exit code")
- }
- fnmain = nil
- callsExit = false
- callsRun = false
- arg = nil
- }
- return true
- }
-
- switch node := node.(type) {
- case *ast.FuncDecl:
- if fnmain != nil {
- return true
- }
- if !isTestMain(pass, node) {
- return false
- }
- fnmain = node
- arg = pass.TypesInfo.ObjectOf(node.Type.Params.List[0].Names[0])
- return true
- case *ast.CallExpr:
- if code.IsCallToAST(pass, node, "os.Exit") {
- callsExit = true
- return false
- }
- sel, ok := node.Fun.(*ast.SelectorExpr)
- if !ok {
- return true
- }
- ident, ok := sel.X.(*ast.Ident)
- if !ok {
- return true
- }
- if arg != pass.TypesInfo.ObjectOf(ident) {
- return true
- }
- if sel.Sel.Name == "Run" {
- callsRun = true
- return false
- }
- return true
- default:
- ExhaustiveTypeSwitch(node)
- return true
- }
- }
- pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Nodes([]ast.Node{(*ast.FuncDecl)(nil), (*ast.CallExpr)(nil)}, fn)
- return nil, nil
-}
-
-func isTestMain(pass *analysis.Pass, decl *ast.FuncDecl) bool {
- if decl.Name.Name != "TestMain" {
- return false
- }
- if len(decl.Type.Params.List) != 1 {
- return false
- }
- arg := decl.Type.Params.List[0]
- if len(arg.Names) != 1 {
- return false
- }
- return code.IsOfType(pass, arg.Type, "*testing.M")
-}
-
-func CheckExec(pass *analysis.Pass) (interface{}, error) {
- fn := func(node ast.Node) {
- call := node.(*ast.CallExpr)
- if !code.IsCallToAST(pass, call, "os/exec.Command") {
- return
- }
- val, ok := code.ExprToString(pass, call.Args[Arg("os/exec.Command.name")])
- if !ok {
- return
- }
- if !strings.Contains(val, " ") || strings.Contains(val, `\`) || strings.Contains(val, "/") {
- return
- }
- report.Report(pass, call.Args[Arg("os/exec.Command.name")],
- "first argument to exec.Command looks like a shell command, but a program name or path are expected")
- }
- code.Preorder(pass, fn, (*ast.CallExpr)(nil))
- return nil, nil
-}
-
-func CheckLoopEmptyDefault(pass *analysis.Pass) (interface{}, error) {
- fn := func(node ast.Node) {
- loop := node.(*ast.ForStmt)
- if len(loop.Body.List) != 1 || loop.Cond != nil || loop.Init != nil {
- return
- }
- sel, ok := loop.Body.List[0].(*ast.SelectStmt)
- if !ok {
- return
- }
- for _, c := range sel.Body.List {
- // FIXME this leaves behind an empty line, and possibly
- // comments in the default branch. We can't easily fix
- // either.
- if comm, ok := c.(*ast.CommClause); ok && comm.Comm == nil && len(comm.Body) == 0 {
- report.Report(pass, comm, "should not have an empty default case in a for+select loop; the loop will spin",
- report.Fixes(edit.Fix("remove empty default branch", edit.Delete(comm))))
- // there can only be one default case
- break
- }
- }
- }
- code.Preorder(pass, fn, (*ast.ForStmt)(nil))
- return nil, nil
-}
-
-func CheckLhsRhsIdentical(pass *analysis.Pass) (interface{}, error) {
- var isFloat func(T types.Type) bool
- isFloat = func(T types.Type) bool {
- switch T := T.Underlying().(type) {
- case *types.Basic:
- kind := T.Kind()
- return kind == types.Float32 || kind == types.Float64
- case *types.Array:
- return isFloat(T.Elem())
- case *types.Struct:
- for i := 0; i < T.NumFields(); i++ {
- if !isFloat(T.Field(i).Type()) {
- return false
- }
- }
- return true
- default:
- return false
- }
- }
-
- // TODO(dh): this check ignores the existence of side-effects and
- // happily flags fn() == fn() – so far, we've had nobody complain
- // about a false positive, and it's caught several bugs in real
- // code.
- fn := func(node ast.Node) {
- op := node.(*ast.BinaryExpr)
- switch op.Op {
- case token.EQL, token.NEQ:
- if isFloat(pass.TypesInfo.TypeOf(op.X)) {
- // f == f and f != f might be used to check for NaN
- return
- }
- case token.SUB, token.QUO, token.AND, token.REM, token.OR, token.XOR, token.AND_NOT,
- token.LAND, token.LOR, token.LSS, token.GTR, token.LEQ, token.GEQ:
- default:
- // For some ops, such as + and *, it can make sense to
- // have identical operands
- return
- }
-
- if reflect.TypeOf(op.X) != reflect.TypeOf(op.Y) {
- return
- }
- if report.Render(pass, op.X) != report.Render(pass, op.Y) {
- return
- }
- l1, ok1 := op.X.(*ast.BasicLit)
- l2, ok2 := op.Y.(*ast.BasicLit)
- if ok1 && ok2 && l1.Kind == token.INT && l2.Kind == l1.Kind && l1.Value == "0" && l2.Value == l1.Value && code.IsGenerated(pass, l1.Pos()) {
- // cgo generates the following function call:
- // _cgoCheckPointer(_cgoBase0, 0 == 0) – it uses 0 == 0
- // instead of true in case the user shadowed the
- // identifier. Ideally we'd restrict this exception to
- // calls of _cgoCheckPointer, but it's not worth the
- // hassle of keeping track of the stack.
- // are very rare to begin with, and we're mostly checking
- // for them to catch typos such as 1 == 1 where the user
- // meant to type i == 1. The odds of a false negative for
- // 0 == 0 are slim.
- return
- }
- report.Report(pass, op, fmt.Sprintf("identical expressions on the left and right side of the '%s' operator", op.Op))
- }
- code.Preorder(pass, fn, (*ast.BinaryExpr)(nil))
- return nil, nil
-}
-
-func CheckScopedBreak(pass *analysis.Pass) (interface{}, error) {
- fn := func(node ast.Node) {
- var body *ast.BlockStmt
- switch node := node.(type) {
- case *ast.ForStmt:
- body = node.Body
- case *ast.RangeStmt:
- body = node.Body
- default:
- ExhaustiveTypeSwitch(node)
- }
- for _, stmt := range body.List {
- var blocks [][]ast.Stmt
- switch stmt := stmt.(type) {
- case *ast.SwitchStmt:
- for _, c := range stmt.Body.List {
- blocks = append(blocks, c.(*ast.CaseClause).Body)
- }
- case *ast.SelectStmt:
- for _, c := range stmt.Body.List {
- blocks = append(blocks, c.(*ast.CommClause).Body)
- }
- default:
- continue
- }
-
- for _, body := range blocks {
- if len(body) == 0 {
- continue
- }
- lasts := []ast.Stmt{body[len(body)-1]}
- // TODO(dh): unfold all levels of nested block
- // statements, not just a single level if statement
- if ifs, ok := lasts[0].(*ast.IfStmt); ok {
- if len(ifs.Body.List) == 0 {
- continue
- }
- lasts[0] = ifs.Body.List[len(ifs.Body.List)-1]
-
- if block, ok := ifs.Else.(*ast.BlockStmt); ok {
- if len(block.List) != 0 {
- lasts = append(lasts, block.List[len(block.List)-1])
- }
- }
- }
- for _, last := range lasts {
- branch, ok := last.(*ast.BranchStmt)
- if !ok || branch.Tok != token.BREAK || branch.Label != nil {
- continue
- }
- report.Report(pass, branch, "ineffective break statement. Did you mean to break out of the outer loop?")
- }
- }
- }
- }
- code.Preorder(pass, fn, (*ast.ForStmt)(nil), (*ast.RangeStmt)(nil))
- return nil, nil
-}
-
-func CheckUnsafePrintf(pass *analysis.Pass) (interface{}, error) {
- fn := func(node ast.Node) {
- call := node.(*ast.CallExpr)
- name := code.CallNameAST(pass, call)
- var arg int
-
- switch name {
- case "fmt.Printf", "fmt.Sprintf", "log.Printf":
- arg = Arg("fmt.Printf.format")
- case "fmt.Fprintf":
- arg = Arg("fmt.Fprintf.format")
- default:
- return
- }
- if len(call.Args) != arg+1 {
- return
- }
- switch call.Args[arg].(type) {
- case *ast.CallExpr, *ast.Ident:
- default:
- return
- }
-
- alt := name[:len(name)-1]
- report.Report(pass, call,
- "printf-style function with dynamic format string and no further arguments should use print-style function instead",
- report.Fixes(edit.Fix(fmt.Sprintf("use %s instead of %s", alt, name), edit.ReplaceWithString(pass.Fset, call.Fun, alt))))
- }
- code.Preorder(pass, fn, (*ast.CallExpr)(nil))
- return nil, nil
-}
-
-func CheckEarlyDefer(pass *analysis.Pass) (interface{}, error) {
- fn := func(node ast.Node) {
- block := node.(*ast.BlockStmt)
- if len(block.List) < 2 {
- return
- }
- for i, stmt := range block.List {
- if i == len(block.List)-1 {
- break
- }
- assign, ok := stmt.(*ast.AssignStmt)
- if !ok {
- continue
- }
- if len(assign.Rhs) != 1 {
- continue
- }
- if len(assign.Lhs) < 2 {
- continue
- }
- if lhs, ok := assign.Lhs[len(assign.Lhs)-1].(*ast.Ident); ok && lhs.Name == "_" {
- continue
- }
- call, ok := assign.Rhs[0].(*ast.CallExpr)
- if !ok {
- continue
- }
- sig, ok := pass.TypesInfo.TypeOf(call.Fun).(*types.Signature)
- if !ok {
- continue
- }
- if sig.Results().Len() < 2 {
- continue
- }
- last := sig.Results().At(sig.Results().Len() - 1)
- // FIXME(dh): check that it's error from universe, not
- // another type of the same name
- if last.Type().String() != "error" {
- continue
- }
- lhs, ok := assign.Lhs[0].(*ast.Ident)
- if !ok {
- continue
- }
- def, ok := block.List[i+1].(*ast.DeferStmt)
- if !ok {
- continue
- }
- sel, ok := def.Call.Fun.(*ast.SelectorExpr)
- if !ok {
- continue
- }
- ident, ok := selectorX(sel).(*ast.Ident)
- if !ok {
- continue
- }
- if ident.Obj != lhs.Obj {
- continue
- }
- if sel.Sel.Name != "Close" {
- continue
- }
- report.Report(pass, def, fmt.Sprintf("should check returned error before deferring %s", report.Render(pass, def.Call)))
- }
- }
- code.Preorder(pass, fn, (*ast.BlockStmt)(nil))
- return nil, nil
-}
-
-func selectorX(sel *ast.SelectorExpr) ast.Node {
- switch x := sel.X.(type) {
- case *ast.SelectorExpr:
- return selectorX(x)
- default:
- return x
- }
-}
-
-func CheckEmptyCriticalSection(pass *analysis.Pass) (interface{}, error) {
- if pass.Pkg.Path() == "sync_test" {
- // exception for the sync package's tests
- return nil, nil
- }
-
- // Initially it might seem like this check would be easier to
- // implement using IR. After all, we're only checking for two
- // consecutive method calls. In reality, however, there may be any
- // number of other instructions between the lock and unlock, while
- // still constituting an empty critical section. For example,
- // given `m.x().Lock(); m.x().Unlock()`, there will be a call to
- // x(). In the AST-based approach, this has a tiny potential for a
- // false positive (the second call to x might be doing work that
- // is protected by the mutex). In an IR-based approach, however,
- // it would miss a lot of real bugs.
-
- mutexParams := func(s ast.Stmt) (x ast.Expr, funcName string, ok bool) {
- expr, ok := s.(*ast.ExprStmt)
- if !ok {
- return nil, "", false
- }
- call, ok := expr.X.(*ast.CallExpr)
- if !ok {
- return nil, "", false
- }
- sel, ok := call.Fun.(*ast.SelectorExpr)
- if !ok {
- return nil, "", false
- }
-
- fn, ok := pass.TypesInfo.ObjectOf(sel.Sel).(*types.Func)
- if !ok {
- return nil, "", false
- }
- sig := fn.Type().(*types.Signature)
- if sig.Params().Len() != 0 || sig.Results().Len() != 0 {
- return nil, "", false
- }
-
- return sel.X, fn.Name(), true
- }
-
- fn := func(node ast.Node) {
- block := node.(*ast.BlockStmt)
- if len(block.List) < 2 {
- return
- }
- for i := range block.List[:len(block.List)-1] {
- sel1, method1, ok1 := mutexParams(block.List[i])
- sel2, method2, ok2 := mutexParams(block.List[i+1])
-
- if !ok1 || !ok2 || report.Render(pass, sel1) != report.Render(pass, sel2) {
- continue
- }
- if (method1 == "Lock" && method2 == "Unlock") ||
- (method1 == "RLock" && method2 == "RUnlock") {
- report.Report(pass, block.List[i+1], "empty critical section")
- }
- }
- }
- code.Preorder(pass, fn, (*ast.BlockStmt)(nil))
- return nil, nil
-}
-
-var (
- // cgo produces code like fn(&*_Cvar_kSomeCallbacks) which we don't
- // want to flag.
- cgoIdent = regexp.MustCompile(`^_C(func|var)_.+$`)
- checkIneffectiveCopyQ1 = pattern.MustParse(`(UnaryExpr "&" (StarExpr obj))`)
- checkIneffectiveCopyQ2 = pattern.MustParse(`(StarExpr (UnaryExpr "&" _))`)
-)
-
-func CheckIneffectiveCopy(pass *analysis.Pass) (interface{}, error) {
- fn := func(node ast.Node) {
- if m, ok := Match(pass, checkIneffectiveCopyQ1, node); ok {
- if ident, ok := m.State["obj"].(*ast.Ident); !ok || !cgoIdent.MatchString(ident.Name) {
- report.Report(pass, node, "&*x will be simplified to x. It will not copy x.")
- }
- } else if _, ok := Match(pass, checkIneffectiveCopyQ2, node); ok {
- report.Report(pass, node, "*&x will be simplified to x. It will not copy x.")
- }
- }
- code.Preorder(pass, fn, (*ast.UnaryExpr)(nil), (*ast.StarExpr)(nil))
- return nil, nil
-}
-
-func CheckCanonicalHeaderKey(pass *analysis.Pass) (interface{}, error) {
- fn := func(node ast.Node, push bool) bool {
- if !push {
- return false
- }
- assign, ok := node.(*ast.AssignStmt)
- if ok {
- // TODO(dh): This risks missing some Header reads, for
- // example in `h1["foo"] = h2["foo"]` – these edge
- // cases are probably rare enough to ignore for now.
- for _, expr := range assign.Lhs {
- op, ok := expr.(*ast.IndexExpr)
- if !ok {
- continue
- }
- if code.IsOfType(pass, op.X, "net/http.Header") {
- return false
- }
- }
- return true
- }
- op, ok := node.(*ast.IndexExpr)
- if !ok {
- return true
- }
- if !code.IsOfType(pass, op.X, "net/http.Header") {
- return true
- }
- s, ok := code.ExprToString(pass, op.Index)
- if !ok {
- return true
- }
- canonical := http.CanonicalHeaderKey(s)
- if s == canonical {
- return true
- }
- var fix analysis.SuggestedFix
- switch op.Index.(type) {
- case *ast.BasicLit:
- fix = edit.Fix("canonicalize header key", edit.ReplaceWithString(pass.Fset, op.Index, strconv.Quote(canonical)))
- case *ast.Ident:
- call := &ast.CallExpr{
- Fun: Selector("http", "CanonicalHeaderKey"),
- Args: []ast.Expr{op.Index},
- }
- fix = edit.Fix("wrap in http.CanonicalHeaderKey", edit.ReplaceWithNode(pass.Fset, op.Index, call))
- }
- msg := fmt.Sprintf("keys in http.Header are canonicalized, %q is not canonical; fix the constant or use http.CanonicalHeaderKey", s)
- if fix.Message != "" {
- report.Report(pass, op, msg, report.Fixes(fix))
- } else {
- report.Report(pass, op, msg)
- }
- return true
- }
- pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Nodes([]ast.Node{(*ast.AssignStmt)(nil), (*ast.IndexExpr)(nil)}, fn)
- return nil, nil
-}
-
-func CheckBenchmarkN(pass *analysis.Pass) (interface{}, error) {
- fn := func(node ast.Node) {
- assign := node.(*ast.AssignStmt)
- if len(assign.Lhs) != 1 || len(assign.Rhs) != 1 {
- return
- }
- sel, ok := assign.Lhs[0].(*ast.SelectorExpr)
- if !ok {
- return
- }
- if sel.Sel.Name != "N" {
- return
- }
- if !code.IsOfType(pass, sel.X, "*testing.B") {
- return
- }
- report.Report(pass, assign, fmt.Sprintf("should not assign to %s", report.Render(pass, sel)))
- }
- code.Preorder(pass, fn, (*ast.AssignStmt)(nil))
- return nil, nil
-}
-
-func CheckUnreadVariableValues(pass *analysis.Pass) (interface{}, error) {
- for _, fn := range pass.ResultOf[buildir.Analyzer].(*buildir.IR).SrcFuncs {
- if code.IsExample(fn) {
- continue
- }
- node := fn.Source()
- if node == nil {
- continue
- }
- if gen, ok := code.Generator(pass, node.Pos()); ok && gen == facts.Goyacc {
- // Don't flag unused values in code generated by goyacc.
- // There may be hundreds of those due to the way the state
- // machine is constructed.
- continue
- }
-
- switchTags := map[ir.Value]struct{}{}
- ast.Inspect(node, func(node ast.Node) bool {
- s, ok := node.(*ast.SwitchStmt)
- if !ok {
- return true
- }
- v, _ := fn.ValueForExpr(s.Tag)
- switchTags[v] = struct{}{}
- return true
- })
-
- // OPT(dh): don't use a map, possibly use a bitset
- var hasUse func(v ir.Value, seen map[ir.Value]struct{}) bool
- hasUse = func(v ir.Value, seen map[ir.Value]struct{}) bool {
- if _, ok := seen[v]; ok {
- return false
- }
- if _, ok := switchTags[v]; ok {
- return true
- }
- refs := v.Referrers()
- if refs == nil {
- // TODO investigate why refs can be nil
- return true
- }
- for _, ref := range *refs {
- switch ref := ref.(type) {
- case *ir.DebugRef:
- case *ir.Sigma:
- if seen == nil {
- seen = map[ir.Value]struct{}{}
- }
- seen[v] = struct{}{}
- if hasUse(ref, seen) {
- return true
- }
- case *ir.Phi:
- if seen == nil {
- seen = map[ir.Value]struct{}{}
- }
- seen[v] = struct{}{}
- if hasUse(ref, seen) {
- return true
- }
- default:
- return true
- }
- }
- return false
- }
-
- ast.Inspect(node, func(node ast.Node) bool {
- assign, ok := node.(*ast.AssignStmt)
- if !ok {
- return true
- }
- if len(assign.Lhs) > 1 && len(assign.Rhs) == 1 {
- // Either a function call with multiple return values,
- // or a comma-ok assignment
-
- val, _ := fn.ValueForExpr(assign.Rhs[0])
- if val == nil {
- return true
- }
- refs := val.Referrers()
- if refs == nil {
- return true
- }
- for _, ref := range *refs {
- ex, ok := ref.(*ir.Extract)
- if !ok {
- continue
- }
- if !hasUse(ex, nil) {
- lhs := assign.Lhs[ex.Index]
- if ident, ok := lhs.(*ast.Ident); !ok || ok && ident.Name == "_" {
- continue
- }
- report.Report(pass, assign, fmt.Sprintf("this value of %s is never used", lhs))
- }
- }
- return true
- }
- for i, lhs := range assign.Lhs {
- rhs := assign.Rhs[i]
- if ident, ok := lhs.(*ast.Ident); !ok || ok && ident.Name == "_" {
- continue
- }
- val, _ := fn.ValueForExpr(rhs)
- if val == nil {
- continue
- }
-
- if !hasUse(val, nil) {
- report.Report(pass, assign, fmt.Sprintf("this value of %s is never used", lhs))
- }
- }
- return true
- })
- }
- return nil, nil
-}
-
-func CheckPredeterminedBooleanExprs(pass *analysis.Pass) (interface{}, error) {
- for _, fn := range pass.ResultOf[buildir.Analyzer].(*buildir.IR).SrcFuncs {
- for _, block := range fn.Blocks {
- for _, ins := range block.Instrs {
- binop, ok := ins.(*ir.BinOp)
- if !ok {
- continue
- }
- switch binop.Op {
- case token.GTR, token.LSS, token.EQL, token.NEQ, token.LEQ, token.GEQ:
- default:
- continue
- }
-
- xs, ok1 := consts(binop.X, nil, nil)
- ys, ok2 := consts(binop.Y, nil, nil)
- if !ok1 || !ok2 || len(xs) == 0 || len(ys) == 0 {
- continue
- }
-
- trues := 0
- for _, x := range xs {
- for _, y := range ys {
- if x.Value == nil {
- if y.Value == nil {
- trues++
- }
- continue
- }
- if constant.Compare(x.Value, binop.Op, y.Value) {
- trues++
- }
- }
- }
- b := trues != 0
- if trues == 0 || trues == len(xs)*len(ys) {
- report.Report(pass, binop, fmt.Sprintf("binary expression is always %t for all possible values (%s %s %s)", b, xs, binop.Op, ys))
- }
- }
- }
- }
- return nil, nil
-}
-
-func CheckNilMaps(pass *analysis.Pass) (interface{}, error) {
- for _, fn := range pass.ResultOf[buildir.Analyzer].(*buildir.IR).SrcFuncs {
- for _, block := range fn.Blocks {
- for _, ins := range block.Instrs {
- mu, ok := ins.(*ir.MapUpdate)
- if !ok {
- continue
- }
- c, ok := mu.Map.(*ir.Const)
- if !ok {
- continue
- }
- if c.Value != nil {
- continue
- }
- report.Report(pass, mu, "assignment to nil map")
- }
- }
- }
- return nil, nil
-}
-
-func CheckExtremeComparison(pass *analysis.Pass) (interface{}, error) {
- isobj := func(expr ast.Expr, name string) bool {
- sel, ok := expr.(*ast.SelectorExpr)
- if !ok {
- return false
- }
- return code.IsObject(pass.TypesInfo.ObjectOf(sel.Sel), name)
- }
-
- fn := func(node ast.Node) {
- expr := node.(*ast.BinaryExpr)
- tx := pass.TypesInfo.TypeOf(expr.X)
- basic, ok := tx.Underlying().(*types.Basic)
- if !ok {
- return
- }
-
- var max string
- var min string
-
- switch basic.Kind() {
- case types.Uint8:
- max = "math.MaxUint8"
- case types.Uint16:
- max = "math.MaxUint16"
- case types.Uint32:
- max = "math.MaxUint32"
- case types.Uint64:
- max = "math.MaxUint64"
- case types.Uint:
- max = "math.MaxUint64"
-
- case types.Int8:
- min = "math.MinInt8"
- max = "math.MaxInt8"
- case types.Int16:
- min = "math.MinInt16"
- max = "math.MaxInt16"
- case types.Int32:
- min = "math.MinInt32"
- max = "math.MaxInt32"
- case types.Int64:
- min = "math.MinInt64"
- max = "math.MaxInt64"
- case types.Int:
- min = "math.MinInt64"
- max = "math.MaxInt64"
- }
-
- if (expr.Op == token.GTR || expr.Op == token.GEQ) && isobj(expr.Y, max) ||
- (expr.Op == token.LSS || expr.Op == token.LEQ) && isobj(expr.X, max) {
- report.Report(pass, expr, fmt.Sprintf("no value of type %s is greater than %s", basic, max))
- }
- if expr.Op == token.LEQ && isobj(expr.Y, max) ||
- expr.Op == token.GEQ && isobj(expr.X, max) {
- report.Report(pass, expr, fmt.Sprintf("every value of type %s is <= %s", basic, max))
- }
-
- if (basic.Info() & types.IsUnsigned) != 0 {
- if (expr.Op == token.LSS && code.IsIntLiteral(expr.Y, "0")) ||
- (expr.Op == token.GTR && code.IsIntLiteral(expr.X, "0")) {
- report.Report(pass, expr, fmt.Sprintf("no value of type %s is less than 0", basic))
- }
- if expr.Op == token.GEQ && code.IsIntLiteral(expr.Y, "0") ||
- expr.Op == token.LEQ && code.IsIntLiteral(expr.X, "0") {
- report.Report(pass, expr, fmt.Sprintf("every value of type %s is >= 0", basic))
- }
- } else {
- if (expr.Op == token.LSS || expr.Op == token.LEQ) && isobj(expr.Y, min) ||
- (expr.Op == token.GTR || expr.Op == token.GEQ) && isobj(expr.X, min) {
- report.Report(pass, expr, fmt.Sprintf("no value of type %s is less than %s", basic, min))
- }
- if expr.Op == token.GEQ && isobj(expr.Y, min) ||
- expr.Op == token.LEQ && isobj(expr.X, min) {
- report.Report(pass, expr, fmt.Sprintf("every value of type %s is >= %s", basic, min))
- }
- }
-
- }
- code.Preorder(pass, fn, (*ast.BinaryExpr)(nil))
- return nil, nil
-}
-
-func consts(val ir.Value, out []*ir.Const, visitedPhis map[string]bool) ([]*ir.Const, bool) {
- if visitedPhis == nil {
- visitedPhis = map[string]bool{}
- }
- var ok bool
- switch val := val.(type) {
- case *ir.Phi:
- if visitedPhis[val.Name()] {
- break
- }
- visitedPhis[val.Name()] = true
- vals := val.Operands(nil)
- for _, phival := range vals {
- out, ok = consts(*phival, out, visitedPhis)
- if !ok {
- return nil, false
- }
- }
- case *ir.Const:
- out = append(out, val)
- case *ir.Convert:
- out, ok = consts(val.X, out, visitedPhis)
- if !ok {
- return nil, false
- }
- default:
- return nil, false
- }
- if len(out) < 2 {
- return out, true
- }
- uniq := []*ir.Const{out[0]}
- for _, val := range out[1:] {
- if val.Value == uniq[len(uniq)-1].Value {
- continue
- }
- uniq = append(uniq, val)
- }
- return uniq, true
-}
-
-func CheckLoopCondition(pass *analysis.Pass) (interface{}, error) {
- for _, fn := range pass.ResultOf[buildir.Analyzer].(*buildir.IR).SrcFuncs {
- cb := func(node ast.Node) bool {
- loop, ok := node.(*ast.ForStmt)
- if !ok {
- return true
- }
- if loop.Init == nil || loop.Cond == nil || loop.Post == nil {
- return true
- }
- init, ok := loop.Init.(*ast.AssignStmt)
- if !ok || len(init.Lhs) != 1 || len(init.Rhs) != 1 {
- return true
- }
- cond, ok := loop.Cond.(*ast.BinaryExpr)
- if !ok {
- return true
- }
- x, ok := cond.X.(*ast.Ident)
- if !ok {
- return true
- }
- lhs, ok := init.Lhs[0].(*ast.Ident)
- if !ok {
- return true
- }
- if x.Obj != lhs.Obj {
- return true
- }
- if _, ok := loop.Post.(*ast.IncDecStmt); !ok {
- return true
- }
-
- v, isAddr := fn.ValueForExpr(cond.X)
- if v == nil || isAddr {
- return true
- }
- switch v := v.(type) {
- case *ir.Phi:
- ops := v.Operands(nil)
- if len(ops) != 2 {
- return true
- }
- _, ok := (*ops[0]).(*ir.Const)
- if !ok {
- return true
- }
- sigma, ok := (*ops[1]).(*ir.Sigma)
- if !ok {
- return true
- }
- if sigma.X != v {
- return true
- }
- case *ir.Load:
- return true
- }
- report.Report(pass, cond, "variable in loop condition never changes")
-
- return true
- }
- Inspect(fn.Source(), cb)
- }
- return nil, nil
-}
-
-func CheckArgOverwritten(pass *analysis.Pass) (interface{}, error) {
- for _, fn := range pass.ResultOf[buildir.Analyzer].(*buildir.IR).SrcFuncs {
- cb := func(node ast.Node) bool {
- var typ *ast.FuncType
- var body *ast.BlockStmt
- switch fn := node.(type) {
- case *ast.FuncDecl:
- typ = fn.Type
- body = fn.Body
- case *ast.FuncLit:
- typ = fn.Type
- body = fn.Body
- }
- if body == nil {
- return true
- }
- if len(typ.Params.List) == 0 {
- return true
- }
- for _, field := range typ.Params.List {
- for _, arg := range field.Names {
- obj := pass.TypesInfo.ObjectOf(arg)
- var irobj *ir.Parameter
- for _, param := range fn.Params {
- if param.Object() == obj {
- irobj = param
- break
- }
- }
- if irobj == nil {
- continue
- }
- refs := irobj.Referrers()
- if refs == nil {
- continue
- }
- if len(code.FilterDebug(*refs)) != 0 {
- continue
- }
-
- var assignment ast.Node
- ast.Inspect(body, func(node ast.Node) bool {
- if assignment != nil {
- return false
- }
- assign, ok := node.(*ast.AssignStmt)
- if !ok {
- return true
- }
- for _, lhs := range assign.Lhs {
- ident, ok := lhs.(*ast.Ident)
- if !ok {
- continue
- }
- if pass.TypesInfo.ObjectOf(ident) == obj {
- assignment = assign
- return false
- }
- }
- return true
- })
- if assignment != nil {
- report.Report(pass, arg, fmt.Sprintf("argument %s is overwritten before first use", arg),
- report.Related(assignment, fmt.Sprintf("assignment to %s", arg)))
- }
- }
- }
- return true
- }
- Inspect(fn.Source(), cb)
- }
- return nil, nil
-}
-
-func CheckIneffectiveLoop(pass *analysis.Pass) (interface{}, error) {
- // This check detects some, but not all unconditional loop exits.
- // We give up in the following cases:
- //
- // - a goto anywhere in the loop. The goto might skip over our
- // return, and we don't check that it doesn't.
- //
- // - any nested, unlabelled continue, even if it is in another
- // loop or closure.
- fn := func(node ast.Node) {
- var body *ast.BlockStmt
- switch fn := node.(type) {
- case *ast.FuncDecl:
- body = fn.Body
- case *ast.FuncLit:
- body = fn.Body
- default:
- ExhaustiveTypeSwitch(node)
- }
- if body == nil {
- return
- }
- labels := map[*ast.Object]ast.Stmt{}
- ast.Inspect(body, func(node ast.Node) bool {
- label, ok := node.(*ast.LabeledStmt)
- if !ok {
- return true
- }
- labels[label.Label.Obj] = label.Stmt
- return true
- })
-
- ast.Inspect(body, func(node ast.Node) bool {
- var loop ast.Node
- var body *ast.BlockStmt
- switch node := node.(type) {
- case *ast.ForStmt:
- body = node.Body
- loop = node
- case *ast.RangeStmt:
- typ := pass.TypesInfo.TypeOf(node.X)
- if _, ok := typ.Underlying().(*types.Map); ok {
- // looping once over a map is a valid pattern for
- // getting an arbitrary element.
- return true
- }
- body = node.Body
- loop = node
- default:
- return true
- }
- if len(body.List) < 2 {
- // avoid flagging the somewhat common pattern of using
- // a range loop to get the first element in a slice,
- // or the first rune in a string.
- return true
- }
- var unconditionalExit ast.Node
- hasBranching := false
- for _, stmt := range body.List {
- switch stmt := stmt.(type) {
- case *ast.BranchStmt:
- switch stmt.Tok {
- case token.BREAK:
- if stmt.Label == nil || labels[stmt.Label.Obj] == loop {
- unconditionalExit = stmt
- }
- case token.CONTINUE:
- if stmt.Label == nil || labels[stmt.Label.Obj] == loop {
- unconditionalExit = nil
- return false
- }
- }
- case *ast.ReturnStmt:
- unconditionalExit = stmt
- case *ast.IfStmt, *ast.ForStmt, *ast.RangeStmt, *ast.SwitchStmt, *ast.SelectStmt:
- hasBranching = true
- }
- }
- if unconditionalExit == nil || !hasBranching {
- return false
- }
- ast.Inspect(body, func(node ast.Node) bool {
- if branch, ok := node.(*ast.BranchStmt); ok {
-
- switch branch.Tok {
- case token.GOTO:
- unconditionalExit = nil
- return false
- case token.CONTINUE:
- if branch.Label != nil && labels[branch.Label.Obj] != loop {
- return true
- }
- unconditionalExit = nil
- return false
- }
- }
- return true
- })
- if unconditionalExit != nil {
- report.Report(pass, unconditionalExit, "the surrounding loop is unconditionally terminated")
- }
- return true
- })
- }
- code.Preorder(pass, fn, (*ast.FuncDecl)(nil), (*ast.FuncLit)(nil))
- return nil, nil
-}
-
-var checkNilContextQ = pattern.MustParse(`(CallExpr fun@(Function _) (Builtin "nil"):_)`)
-
-func CheckNilContext(pass *analysis.Pass) (interface{}, error) {
- todo := &ast.CallExpr{
- Fun: Selector("context", "TODO"),
- }
- bg := &ast.CallExpr{
- Fun: Selector("context", "Background"),
- }
- fn := func(node ast.Node) {
- m, ok := Match(pass, checkNilContextQ, node)
- if !ok {
- return
- }
-
- call := node.(*ast.CallExpr)
- fun, ok := m.State["fun"].(*types.Func)
- if !ok {
- // it might also be a builtin
- return
- }
- sig := fun.Type().(*types.Signature)
- if sig.Params().Len() == 0 {
- // Our CallExpr might've matched a method expression, like
- // (*T).Foo(nil) – here, nil isn't the first argument of
- // the Foo method, but the method receiver.
- return
- }
- if !code.IsType(sig.Params().At(0).Type(), "context.Context") {
- return
- }
- report.Report(pass, call.Args[0],
- "do not pass a nil Context, even if a function permits it; pass context.TODO if you are unsure about which Context to use", report.Fixes(
- edit.Fix("use context.TODO", edit.ReplaceWithNode(pass.Fset, call.Args[0], todo)),
- edit.Fix("use context.Background", edit.ReplaceWithNode(pass.Fset, call.Args[0], bg))))
- }
- code.Preorder(pass, fn, (*ast.CallExpr)(nil))
- return nil, nil
-}
-
-var (
- checkSeekerQ = pattern.MustParse(`(CallExpr fun@(SelectorExpr _ (Ident "Seek")) [arg1@(SelectorExpr (Ident "io") (Ident (Or "SeekStart" "SeekCurrent" "SeekEnd"))) arg2])`)
- checkSeekerR = pattern.MustParse(`(CallExpr fun [arg2 arg1])`)
-)
-
-func CheckSeeker(pass *analysis.Pass) (interface{}, error) {
- fn := func(node ast.Node) {
- if _, edits, ok := MatchAndEdit(pass, checkSeekerQ, checkSeekerR, node); ok {
- report.Report(pass, node, "the first argument of io.Seeker is the offset, but an io.Seek* constant is being used instead",
- report.Fixes(edit.Fix("swap arguments", edits...)))
- }
- }
- code.Preorder(pass, fn, (*ast.CallExpr)(nil))
- return nil, nil
-}
-
-func CheckIneffectiveAppend(pass *analysis.Pass) (interface{}, error) {
- isAppend := func(ins ir.Value) bool {
- call, ok := ins.(*ir.Call)
- if !ok {
- return false
- }
- if call.Call.IsInvoke() {
- return false
- }
- if builtin, ok := call.Call.Value.(*ir.Builtin); !ok || builtin.Name() != "append" {
- return false
- }
- return true
- }
-
- for _, fn := range pass.ResultOf[buildir.Analyzer].(*buildir.IR).SrcFuncs {
- for _, block := range fn.Blocks {
- for _, ins := range block.Instrs {
- val, ok := ins.(ir.Value)
- if !ok || !isAppend(val) {
- continue
- }
-
- isUsed := false
- visited := map[ir.Instruction]bool{}
- var walkRefs func(refs []ir.Instruction)
- walkRefs = func(refs []ir.Instruction) {
- loop:
- for _, ref := range refs {
- if visited[ref] {
- continue
- }
- visited[ref] = true
- if _, ok := ref.(*ir.DebugRef); ok {
- continue
- }
- switch ref := ref.(type) {
- case *ir.Phi:
- walkRefs(*ref.Referrers())
- case *ir.Sigma:
- walkRefs(*ref.Referrers())
- case ir.Value:
- if !isAppend(ref) {
- isUsed = true
- } else {
- walkRefs(*ref.Referrers())
- }
- case ir.Instruction:
- isUsed = true
- break loop
- }
- }
- }
-
- refs := val.Referrers()
- if refs == nil {
- continue
- }
- walkRefs(*refs)
-
- if !isUsed {
- report.Report(pass, ins, "this result of append is never used, except maybe in other appends")
- }
- }
- }
- }
- return nil, nil
-}
-
-func CheckConcurrentTesting(pass *analysis.Pass) (interface{}, error) {
- for _, fn := range pass.ResultOf[buildir.Analyzer].(*buildir.IR).SrcFuncs {
- for _, block := range fn.Blocks {
- for _, ins := range block.Instrs {
- gostmt, ok := ins.(*ir.Go)
- if !ok {
- continue
- }
- var fn *ir.Function
- switch val := gostmt.Call.Value.(type) {
- case *ir.Function:
- fn = val
- case *ir.MakeClosure:
- fn = val.Fn.(*ir.Function)
- default:
- continue
- }
- if fn.Blocks == nil {
- continue
- }
- for _, block := range fn.Blocks {
- for _, ins := range block.Instrs {
- call, ok := ins.(*ir.Call)
- if !ok {
- continue
- }
- if call.Call.IsInvoke() {
- continue
- }
- callee := call.Call.StaticCallee()
- if callee == nil {
- continue
- }
- recv := callee.Signature.Recv()
- if recv == nil {
- continue
- }
- if !code.IsType(recv.Type(), "*testing.common") {
- continue
- }
- fn, ok := call.Call.StaticCallee().Object().(*types.Func)
- if !ok {
- continue
- }
- name := fn.Name()
- switch name {
- case "FailNow", "Fatal", "Fatalf", "SkipNow", "Skip", "Skipf":
- default:
- continue
- }
- // TODO(dh): don't report multiple diagnostics
- // for multiple calls to T.Fatal, but do
- // collect all of them as related information
- report.Report(pass, gostmt, fmt.Sprintf("the goroutine calls T.%s, which must be called in the same goroutine as the test", name),
- report.Related(call, fmt.Sprintf("call to T.%s", name)))
- }
- }
- }
- }
- }
- return nil, nil
-}
-
-func eachCall(fn *ir.Function, cb func(caller *ir.Function, site ir.CallInstruction, callee *ir.Function)) {
- for _, b := range fn.Blocks {
- for _, instr := range b.Instrs {
- if site, ok := instr.(ir.CallInstruction); ok {
- if g := site.Common().StaticCallee(); g != nil {
- cb(fn, site, g)
- }
- }
- }
- }
-}
-
-func CheckCyclicFinalizer(pass *analysis.Pass) (interface{}, error) {
- cb := func(caller *ir.Function, site ir.CallInstruction, callee *ir.Function) {
- if callee.RelString(nil) != "runtime.SetFinalizer" {
- return
- }
- arg0 := site.Common().Args[Arg("runtime.SetFinalizer.obj")]
- if iface, ok := arg0.(*ir.MakeInterface); ok {
- arg0 = iface.X
- }
- load, ok := arg0.(*ir.Load)
- if !ok {
- return
- }
- v, ok := load.X.(*ir.Alloc)
- if !ok {
- return
- }
- arg1 := site.Common().Args[Arg("runtime.SetFinalizer.finalizer")]
- if iface, ok := arg1.(*ir.MakeInterface); ok {
- arg1 = iface.X
- }
- mc, ok := arg1.(*ir.MakeClosure)
- if !ok {
- return
- }
- for _, b := range mc.Bindings {
- if b == v {
- pos := lint.DisplayPosition(pass.Fset, mc.Fn.Pos())
- report.Report(pass, site, fmt.Sprintf("the finalizer closes over the object, preventing the finalizer from ever running (at %s)", pos))
- }
- }
- }
- for _, fn := range pass.ResultOf[buildir.Analyzer].(*buildir.IR).SrcFuncs {
- eachCall(fn, cb)
- }
- return nil, nil
-}
-
-/*
-func CheckSliceOutOfBounds(pass *analysis.Pass) (interface{}, error) {
- for _, fn := range pass.ResultOf[buildir.Analyzer].(*buildir.IR).SrcFuncs {
- for _, block := range fn.Blocks {
- for _, ins := range block.Instrs {
- ia, ok := ins.(*ir.IndexAddr)
- if !ok {
- continue
- }
- if _, ok := ia.X.Type().Underlying().(*types.Slice); !ok {
- continue
- }
- sr, ok1 := c.funcDescs.Get(fn).Ranges[ia.X].(vrp.SliceInterval)
- idxr, ok2 := c.funcDescs.Get(fn).Ranges[ia.Index].(vrp.IntInterval)
- if !ok1 || !ok2 || !sr.IsKnown() || !idxr.IsKnown() || sr.Length.Empty() || idxr.Empty() {
- continue
- }
- if idxr.Lower.Cmp(sr.Length.Upper) >= 0 {
- report.Nodef(pass, ia, "index out of bounds")
- }
- }
- }
- }
- return nil, nil
-}
-*/
-
-func CheckDeferLock(pass *analysis.Pass) (interface{}, error) {
- for _, fn := range pass.ResultOf[buildir.Analyzer].(*buildir.IR).SrcFuncs {
- for _, block := range fn.Blocks {
- instrs := code.FilterDebug(block.Instrs)
- if len(instrs) < 2 {
- continue
- }
- for i, ins := range instrs[:len(instrs)-1] {
- call, ok := ins.(*ir.Call)
- if !ok {
- continue
- }
- if !code.IsCallToAny(call.Common(), "(*sync.Mutex).Lock", "(*sync.RWMutex).RLock") {
- continue
- }
- nins, ok := instrs[i+1].(*ir.Defer)
- if !ok {
- continue
- }
- if !code.IsCallToAny(&nins.Call, "(*sync.Mutex).Lock", "(*sync.RWMutex).RLock") {
- continue
- }
- if call.Common().Args[0] != nins.Call.Args[0] {
- continue
- }
- name := shortCallName(call.Common())
- alt := ""
- switch name {
- case "Lock":
- alt = "Unlock"
- case "RLock":
- alt = "RUnlock"
- }
- report.Report(pass, nins, fmt.Sprintf("deferring %s right after having locked already; did you mean to defer %s?", name, alt))
- }
- }
- }
- return nil, nil
-}
-
-func CheckNaNComparison(pass *analysis.Pass) (interface{}, error) {
- isNaN := func(v ir.Value) bool {
- call, ok := v.(*ir.Call)
- if !ok {
- return false
- }
- return code.IsCallTo(call.Common(), "math.NaN")
- }
- for _, fn := range pass.ResultOf[buildir.Analyzer].(*buildir.IR).SrcFuncs {
- for _, block := range fn.Blocks {
- for _, ins := range block.Instrs {
- ins, ok := ins.(*ir.BinOp)
- if !ok {
- continue
- }
- if isNaN(ins.X) || isNaN(ins.Y) {
- report.Report(pass, ins, "no value is equal to NaN, not even NaN itself")
- }
- }
- }
- }
- return nil, nil
-}
-
-func CheckInfiniteRecursion(pass *analysis.Pass) (interface{}, error) {
- for _, fn := range pass.ResultOf[buildir.Analyzer].(*buildir.IR).SrcFuncs {
- eachCall(fn, func(caller *ir.Function, site ir.CallInstruction, callee *ir.Function) {
- if callee != fn {
- return
- }
- if _, ok := site.(*ir.Go); ok {
- // Recursively spawning goroutines doesn't consume
- // stack space infinitely, so don't flag it.
- return
- }
-
- block := site.Block()
- canReturn := false
- for _, b := range fn.Blocks {
- if block.Dominates(b) {
- continue
- }
- if len(b.Instrs) == 0 {
- continue
- }
- if _, ok := b.Control().(*ir.Return); ok {
- canReturn = true
- break
- }
- }
- if canReturn {
- return
- }
- report.Report(pass, site, "infinite recursive call")
- })
- }
- return nil, nil
-}
-
-func objectName(obj types.Object) string {
- if obj == nil {
- return ""
- }
- var name string
- if obj.Pkg() != nil && obj.Pkg().Scope().Lookup(obj.Name()) == obj {
- s := obj.Pkg().Path()
- if s != "" {
- name += s + "."
- }
- }
- name += obj.Name()
- return name
-}
-
-func isName(pass *analysis.Pass, expr ast.Expr, name string) bool {
- var obj types.Object
- switch expr := expr.(type) {
- case *ast.Ident:
- obj = pass.TypesInfo.ObjectOf(expr)
- case *ast.SelectorExpr:
- obj = pass.TypesInfo.ObjectOf(expr.Sel)
- }
- return objectName(obj) == name
-}
-
-func CheckLeakyTimeTick(pass *analysis.Pass) (interface{}, error) {
- for _, fn := range pass.ResultOf[buildir.Analyzer].(*buildir.IR).SrcFuncs {
- if code.IsMainLike(pass) || code.IsInTest(pass, fn) {
- continue
- }
- for _, block := range fn.Blocks {
- for _, ins := range block.Instrs {
- call, ok := ins.(*ir.Call)
- if !ok || !code.IsCallTo(call.Common(), "time.Tick") {
- continue
- }
- if !functions.Terminates(call.Parent()) {
- continue
- }
- report.Report(pass, call, "using time.Tick leaks the underlying ticker, consider using it only in endless functions, tests and the main package, and use time.NewTicker here")
- }
- }
- }
- return nil, nil
-}
-
-var checkDoubleNegationQ = pattern.MustParse(`(UnaryExpr "!" single@(UnaryExpr "!" x))`)
-
-func CheckDoubleNegation(pass *analysis.Pass) (interface{}, error) {
- fn := func(node ast.Node) {
- if m, ok := Match(pass, checkDoubleNegationQ, node); ok {
- report.Report(pass, node, "negating a boolean twice has no effect; is this a typo?", report.Fixes(
- edit.Fix("turn into single negation", edit.ReplaceWithNode(pass.Fset, node, m.State["single"].(ast.Node))),
- edit.Fix("remove double negation", edit.ReplaceWithNode(pass.Fset, node, m.State["x"].(ast.Node)))))
- }
- }
- code.Preorder(pass, fn, (*ast.UnaryExpr)(nil))
- return nil, nil
-}
-
-func CheckRepeatedIfElse(pass *analysis.Pass) (interface{}, error) {
- seen := map[ast.Node]bool{}
-
- var collectConds func(ifstmt *ast.IfStmt, conds []ast.Expr) ([]ast.Expr, bool)
- collectConds = func(ifstmt *ast.IfStmt, conds []ast.Expr) ([]ast.Expr, bool) {
- seen[ifstmt] = true
- // Bail if any if-statement has an Init statement or side effects in its condition
- if ifstmt.Init != nil {
- return nil, false
- }
- if code.MayHaveSideEffects(pass, ifstmt.Cond, nil) {
- return nil, false
- }
-
- conds = append(conds, ifstmt.Cond)
- if elsestmt, ok := ifstmt.Else.(*ast.IfStmt); ok {
- return collectConds(elsestmt, conds)
- }
- return conds, true
- }
- fn := func(node ast.Node) {
- ifstmt := node.(*ast.IfStmt)
- if seen[ifstmt] {
- // this if-statement is part of an if/else-if chain that we've already processed
- return
- }
- if ifstmt.Else == nil {
- // there can be at most one condition
- return
- }
- conds, ok := collectConds(ifstmt, nil)
- if !ok {
- return
- }
- if len(conds) < 2 {
- return
- }
- counts := map[string]int{}
- for _, cond := range conds {
- s := report.Render(pass, cond)
- counts[s]++
- if counts[s] == 2 {
- report.Report(pass, cond, "this condition occurs multiple times in this if/else if chain")
- }
- }
- }
- code.Preorder(pass, fn, (*ast.IfStmt)(nil))
- return nil, nil
-}
-
-func CheckSillyBitwiseOps(pass *analysis.Pass) (interface{}, error) {
- // FIXME(dh): what happened here?
- if false {
- for _, fn := range pass.ResultOf[buildir.Analyzer].(*buildir.IR).SrcFuncs {
- for _, block := range fn.Blocks {
- for _, ins := range block.Instrs {
- ins, ok := ins.(*ir.BinOp)
- if !ok {
- continue
- }
-
- if c, ok := ins.Y.(*ir.Const); !ok || c.Value == nil || c.Value.Kind() != constant.Int || c.Uint64() != 0 {
- continue
- }
- switch ins.Op {
- case token.AND, token.OR, token.XOR:
- default:
- // we do not flag shifts because too often, x<<0 is part
- // of a pattern, x<<0, x<<8, x<<16, ...
- continue
- }
- path, _ := astutil.PathEnclosingInterval(code.File(pass, ins), ins.Pos(), ins.Pos())
- if len(path) == 0 {
- continue
- }
-
- if node, ok := path[0].(*ast.BinaryExpr); !ok || !code.IsIntLiteral(node.Y, "0") {
- continue
- }
-
- switch ins.Op {
- case token.AND:
- report.Report(pass, ins, "x & 0 always equals 0")
- case token.OR, token.XOR:
- report.Report(pass, ins, fmt.Sprintf("x %s 0 always equals x", ins.Op))
- }
- }
- }
- }
- }
- fn := func(node ast.Node) {
- binop := node.(*ast.BinaryExpr)
- b, ok := pass.TypesInfo.TypeOf(binop).Underlying().(*types.Basic)
- if !ok {
- return
- }
- if (b.Info() & types.IsInteger) == 0 {
- return
- }
- switch binop.Op {
- case token.AND, token.OR, token.XOR:
- default:
- // we do not flag shifts because too often, x<<0 is part
- // of a pattern, x<<0, x<<8, x<<16, ...
- return
- }
- switch y := binop.Y.(type) {
- case *ast.Ident:
- obj, ok := pass.TypesInfo.ObjectOf(y).(*types.Const)
- if !ok {
- return
- }
- if v, _ := constant.Int64Val(obj.Val()); v != 0 {
- return
- }
- path, _ := astutil.PathEnclosingInterval(code.File(pass, obj), obj.Pos(), obj.Pos())
- if len(path) < 2 {
- return
- }
- spec, ok := path[1].(*ast.ValueSpec)
- if !ok {
- return
- }
- if len(spec.Names) != 1 || len(spec.Values) != 1 {
- // TODO(dh): we could support this
- return
- }
- ident, ok := spec.Values[0].(*ast.Ident)
- if !ok {
- return
- }
- if !isIota(pass.TypesInfo.ObjectOf(ident)) {
- return
- }
- switch binop.Op {
- case token.AND:
- report.Report(pass, node,
- fmt.Sprintf("%s always equals 0; %s is defined as iota and has value 0, maybe %s is meant to be 1 << iota?", report.Render(pass, binop), report.Render(pass, binop.Y), report.Render(pass, binop.Y)))
- case token.OR, token.XOR:
- report.Report(pass, node,
- fmt.Sprintf("%s always equals %s; %s is defined as iota and has value 0, maybe %s is meant to be 1 << iota?", report.Render(pass, binop), report.Render(pass, binop.X), report.Render(pass, binop.Y), report.Render(pass, binop.Y)))
- }
- case *ast.BasicLit:
- if !code.IsIntLiteral(binop.Y, "0") {
- return
- }
- switch binop.Op {
- case token.AND:
- report.Report(pass, node, fmt.Sprintf("%s always equals 0", report.Render(pass, binop)))
- case token.OR, token.XOR:
- report.Report(pass, node, fmt.Sprintf("%s always equals %s", report.Render(pass, binop), report.Render(pass, binop.X)))
- }
- default:
- return
- }
- }
- code.Preorder(pass, fn, (*ast.BinaryExpr)(nil))
- return nil, nil
-}
-
-func isIota(obj types.Object) bool {
- if obj.Name() != "iota" {
- return false
- }
- c, ok := obj.(*types.Const)
- if !ok {
- return false
- }
- return c.Pkg() == nil
-}
-
-func CheckNonOctalFileMode(pass *analysis.Pass) (interface{}, error) {
- fn := func(node ast.Node) {
- call := node.(*ast.CallExpr)
- sig, ok := pass.TypesInfo.TypeOf(call.Fun).(*types.Signature)
- if !ok {
- return
- }
- n := sig.Params().Len()
- for i := 0; i < n; i++ {
- typ := sig.Params().At(i).Type()
- if !code.IsType(typ, "os.FileMode") {
- continue
- }
-
- lit, ok := call.Args[i].(*ast.BasicLit)
- if !ok {
- continue
- }
- if len(lit.Value) == 3 &&
- lit.Value[0] != '0' &&
- lit.Value[0] >= '0' && lit.Value[0] <= '7' &&
- lit.Value[1] >= '0' && lit.Value[1] <= '7' &&
- lit.Value[2] >= '0' && lit.Value[2] <= '7' {
-
- v, err := strconv.ParseInt(lit.Value, 10, 64)
- if err != nil {
- continue
- }
- report.Report(pass, call.Args[i], fmt.Sprintf("file mode '%s' evaluates to %#o; did you mean '0%s'?", lit.Value, v, lit.Value),
- report.Fixes(edit.Fix("fix octal literal", edit.ReplaceWithString(pass.Fset, call.Args[i], "0"+lit.Value))))
- }
- }
- }
- code.Preorder(pass, fn, (*ast.CallExpr)(nil))
- return nil, nil
-}
-
-func CheckPureFunctions(pass *analysis.Pass) (interface{}, error) {
- pure := pass.ResultOf[facts.Purity].(facts.PurityResult)
-
-fnLoop:
- for _, fn := range pass.ResultOf[buildir.Analyzer].(*buildir.IR).SrcFuncs {
- if code.IsInTest(pass, fn) {
- params := fn.Signature.Params()
- for i := 0; i < params.Len(); i++ {
- param := params.At(i)
- if code.IsType(param.Type(), "*testing.B") {
- // Ignore discarded pure functions in code related
- // to benchmarks. Instead of matching BenchmarkFoo
- // functions, we match any function accepting a
- // *testing.B. Benchmarks sometimes call generic
- // functions for doing the actual work, and
- // checking for the parameter is a lot easier and
- // faster than analyzing call trees.
- continue fnLoop
- }
- }
- }
-
- for _, b := range fn.Blocks {
- for _, ins := range b.Instrs {
- ins, ok := ins.(*ir.Call)
- if !ok {
- continue
- }
- refs := ins.Referrers()
- if refs == nil || len(code.FilterDebug(*refs)) > 0 {
- continue
- }
-
- callee := ins.Common().StaticCallee()
- if callee == nil {
- continue
- }
- if callee.Object() == nil {
- // TODO(dh): support anonymous functions
- continue
- }
- if _, ok := pure[callee.Object().(*types.Func)]; ok {
- if pass.Pkg.Path() == "fmt_test" && callee.Object().(*types.Func).FullName() == "fmt.Sprintf" {
- // special case for benchmarks in the fmt package
- continue
- }
- report.Report(pass, ins, fmt.Sprintf("%s is a pure function but its return value is ignored", callee.Name()))
- }
- }
- }
- }
- return nil, nil
-}
-
-func CheckDeprecated(pass *analysis.Pass) (interface{}, error) {
- deprs := pass.ResultOf[facts.Deprecated].(facts.DeprecatedResult)
-
- // Selectors can appear outside of function literals, e.g. when
- // declaring package level variables.
-
- var tfn types.Object
- stack := 0
- fn := func(node ast.Node, push bool) bool {
- if !push {
- stack--
- return false
- }
- stack++
- if stack == 1 {
- tfn = nil
- }
- if fn, ok := node.(*ast.FuncDecl); ok {
- tfn = pass.TypesInfo.ObjectOf(fn.Name)
- }
- sel, ok := node.(*ast.SelectorExpr)
- if !ok {
- return true
- }
-
- obj := pass.TypesInfo.ObjectOf(sel.Sel)
- if obj.Pkg() == nil {
- return true
- }
- if pass.Pkg == obj.Pkg() || obj.Pkg().Path()+"_test" == pass.Pkg.Path() {
- // Don't flag stuff in our own package
- return true
- }
- if depr, ok := deprs.Objects[obj]; ok {
- // Look for the first available alternative, not the first
- // version something was deprecated in. If a function was
- // deprecated in Go 1.6, an alternative has been available
- // already in 1.0, and we're targeting 1.2, it still
- // makes sense to use the alternative from 1.0, to be
- // future-proof.
- minVersion := deprecated.Stdlib[code.SelectorName(pass, sel)].AlternativeAvailableSince
- if !code.IsGoVersion(pass, minVersion) {
- return true
- }
-
- if tfn != nil {
- if _, ok := deprs.Objects[tfn]; ok {
- // functions that are deprecated may use deprecated
- // symbols
- return true
- }
- }
- report.Report(pass, sel, fmt.Sprintf("%s is deprecated: %s", report.Render(pass, sel), depr.Msg))
- return true
- }
- return true
- }
-
- fn2 := func(node ast.Node) {
- spec := node.(*ast.ImportSpec)
- var imp *types.Package
- if spec.Name != nil {
- imp = pass.TypesInfo.ObjectOf(spec.Name).(*types.PkgName).Imported()
- } else {
- imp = pass.TypesInfo.Implicits[spec].(*types.PkgName).Imported()
- }
-
- p := spec.Path.Value
- path := p[1 : len(p)-1]
- if depr, ok := deprs.Packages[imp]; ok {
- if path == "github.com/golang/protobuf/proto" {
- gen, ok := code.Generator(pass, spec.Path.Pos())
- if ok && gen == facts.ProtocGenGo {
- return
- }
- }
- report.Report(pass, spec, fmt.Sprintf("package %s is deprecated: %s", path, depr.Msg))
- }
- }
- pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Nodes(nil, fn)
- code.Preorder(pass, fn2, (*ast.ImportSpec)(nil))
- return nil, nil
-}
-
-func callChecker(rules map[string]CallCheck) func(pass *analysis.Pass) (interface{}, error) {
- return func(pass *analysis.Pass) (interface{}, error) {
- return checkCalls(pass, rules)
- }
-}
-
-func checkCalls(pass *analysis.Pass, rules map[string]CallCheck) (interface{}, error) {
- cb := func(caller *ir.Function, site ir.CallInstruction, callee *ir.Function) {
- obj, ok := callee.Object().(*types.Func)
- if !ok {
- return
- }
-
- r, ok := rules[lint.FuncName(obj)]
- if !ok {
- return
- }
- var args []*Argument
- irargs := site.Common().Args
- if callee.Signature.Recv() != nil {
- irargs = irargs[1:]
- }
- for _, arg := range irargs {
- if iarg, ok := arg.(*ir.MakeInterface); ok {
- arg = iarg.X
- }
- args = append(args, &Argument{Value: Value{arg}})
- }
- call := &Call{
- Pass: pass,
- Instr: site,
- Args: args,
- Parent: site.Parent(),
- }
- r(call)
- path, _ := astutil.PathEnclosingInterval(code.File(pass, site), site.Pos(), site.Pos())
- var astcall *ast.CallExpr
- for _, el := range path {
- if expr, ok := el.(*ast.CallExpr); ok {
- astcall = expr
- break
- }
- }
- for idx, arg := range call.Args {
- for _, e := range arg.invalids {
- if astcall != nil {
- report.Report(pass, astcall.Args[idx], e)
- } else {
- report.Report(pass, site, e)
- }
- }
- }
- for _, e := range call.invalids {
- report.Report(pass, call.Instr, e)
- }
- }
- for _, fn := range pass.ResultOf[buildir.Analyzer].(*buildir.IR).SrcFuncs {
- eachCall(fn, cb)
- }
- return nil, nil
-}
-
-func shortCallName(call *ir.CallCommon) string {
- if call.IsInvoke() {
- return ""
- }
- switch v := call.Value.(type) {
- case *ir.Function:
- fn, ok := v.Object().(*types.Func)
- if !ok {
- return ""
- }
- return fn.Name()
- case *ir.Builtin:
- return v.Name()
- }
- return ""
-}
-
-func CheckWriterBufferModified(pass *analysis.Pass) (interface{}, error) {
- // TODO(dh): this might be a good candidate for taint analysis.
- // Taint the argument as MUST_NOT_MODIFY, then propagate that
- // through functions like bytes.Split
-
- for _, fn := range pass.ResultOf[buildir.Analyzer].(*buildir.IR).SrcFuncs {
- sig := fn.Signature
- if fn.Name() != "Write" || sig.Recv() == nil || sig.Params().Len() != 1 || sig.Results().Len() != 2 {
- continue
- }
- tArg, ok := sig.Params().At(0).Type().(*types.Slice)
- if !ok {
- continue
- }
- if basic, ok := tArg.Elem().(*types.Basic); !ok || basic.Kind() != types.Byte {
- continue
- }
- if basic, ok := sig.Results().At(0).Type().(*types.Basic); !ok || basic.Kind() != types.Int {
- continue
- }
- if named, ok := sig.Results().At(1).Type().(*types.Named); !ok || !code.IsType(named, "error") {
- continue
- }
-
- for _, block := range fn.Blocks {
- for _, ins := range block.Instrs {
- switch ins := ins.(type) {
- case *ir.Store:
- addr, ok := ins.Addr.(*ir.IndexAddr)
- if !ok {
- continue
- }
- if addr.X != fn.Params[1] {
- continue
- }
- report.Report(pass, ins, "io.Writer.Write must not modify the provided buffer, not even temporarily")
- case *ir.Call:
- if !code.IsCallTo(ins.Common(), "append") {
- continue
- }
- if ins.Common().Args[0] != fn.Params[1] {
- continue
- }
- report.Report(pass, ins, "io.Writer.Write must not modify the provided buffer, not even temporarily")
- }
- }
- }
- }
- return nil, nil
-}
-
-func loopedRegexp(name string) CallCheck {
- return func(call *Call) {
- if len(extractConsts(call.Args[0].Value.Value)) == 0 {
- return
- }
- if !isInLoop(call.Instr.Block()) {
- return
- }
- call.Invalid(fmt.Sprintf("calling %s in a loop has poor performance, consider using regexp.Compile", name))
- }
-}
-
-func CheckEmptyBranch(pass *analysis.Pass) (interface{}, error) {
- for _, fn := range pass.ResultOf[buildir.Analyzer].(*buildir.IR).SrcFuncs {
- if fn.Source() == nil {
- continue
- }
- if code.IsExample(fn) {
- continue
- }
- cb := func(node ast.Node) bool {
- ifstmt, ok := node.(*ast.IfStmt)
- if !ok {
- return true
- }
- if ifstmt.Else != nil {
- b, ok := ifstmt.Else.(*ast.BlockStmt)
- if !ok || len(b.List) != 0 {
- return true
- }
- report.Report(pass, ifstmt.Else, "empty branch", report.FilterGenerated(), report.ShortRange())
- }
- if len(ifstmt.Body.List) != 0 {
- return true
- }
- report.Report(pass, ifstmt, "empty branch", report.FilterGenerated(), report.ShortRange())
- return true
- }
- Inspect(fn.Source(), cb)
- }
- return nil, nil
-}
-
-func CheckMapBytesKey(pass *analysis.Pass) (interface{}, error) {
- for _, fn := range pass.ResultOf[buildir.Analyzer].(*buildir.IR).SrcFuncs {
- for _, b := range fn.Blocks {
- insLoop:
- for _, ins := range b.Instrs {
- // find []byte -> string conversions
- conv, ok := ins.(*ir.Convert)
- if !ok || conv.Type() != types.Universe.Lookup("string").Type() {
- continue
- }
- if s, ok := conv.X.Type().(*types.Slice); !ok || s.Elem() != types.Universe.Lookup("byte").Type() {
- continue
- }
- refs := conv.Referrers()
- // need at least two (DebugRef) references: the
- // conversion and the *ast.Ident
- if refs == nil || len(*refs) < 2 {
- continue
- }
- ident := false
- // skip first reference, that's the conversion itself
- for _, ref := range (*refs)[1:] {
- switch ref := ref.(type) {
- case *ir.DebugRef:
- if _, ok := ref.Expr.(*ast.Ident); !ok {
- // the string seems to be used somewhere
- // unexpected; the default branch should
- // catch this already, but be safe
- continue insLoop
- } else {
- ident = true
- }
- case *ir.MapLookup:
- default:
- // the string is used somewhere else than a
- // map lookup
- continue insLoop
- }
- }
-
- // the result of the conversion wasn't assigned to an
- // identifier
- if !ident {
- continue
- }
- report.Report(pass, conv, "m[string(key)] would be more efficient than k := string(key); m[k]")
- }
- }
- }
- return nil, nil
-}
-
-func CheckRangeStringRunes(pass *analysis.Pass) (interface{}, error) {
- return sharedcheck.CheckRangeStringRunes(pass)
-}
-
-func CheckSelfAssignment(pass *analysis.Pass) (interface{}, error) {
- pure := pass.ResultOf[facts.Purity].(facts.PurityResult)
-
- fn := func(node ast.Node) {
- assign := node.(*ast.AssignStmt)
- if assign.Tok != token.ASSIGN || len(assign.Lhs) != len(assign.Rhs) {
- return
- }
- for i, lhs := range assign.Lhs {
- rhs := assign.Rhs[i]
- if reflect.TypeOf(lhs) != reflect.TypeOf(rhs) {
- continue
- }
- if code.MayHaveSideEffects(pass, lhs, pure) || code.MayHaveSideEffects(pass, rhs, pure) {
- continue
- }
-
- rlh := report.Render(pass, lhs)
- rrh := report.Render(pass, rhs)
- if rlh == rrh {
- report.Report(pass, assign, fmt.Sprintf("self-assignment of %s to %s", rrh, rlh), report.FilterGenerated())
- }
- }
- }
- code.Preorder(pass, fn, (*ast.AssignStmt)(nil))
- return nil, nil
-}
-
-func buildTagsIdentical(s1, s2 []string) bool {
- if len(s1) != len(s2) {
- return false
- }
- s1s := make([]string, len(s1))
- copy(s1s, s1)
- sort.Strings(s1s)
- s2s := make([]string, len(s2))
- copy(s2s, s2)
- sort.Strings(s2s)
- for i, s := range s1s {
- if s != s2s[i] {
- return false
- }
- }
- return true
-}
-
-func CheckDuplicateBuildConstraints(pass *analysis.Pass) (interface{}, error) {
- for _, f := range pass.Files {
- constraints := buildTags(f)
- for i, constraint1 := range constraints {
- for j, constraint2 := range constraints {
- if i >= j {
- continue
- }
- if buildTagsIdentical(constraint1, constraint2) {
- msg := fmt.Sprintf("identical build constraints %q and %q",
- strings.Join(constraint1, " "),
- strings.Join(constraint2, " "))
- report.Report(pass, f, msg, report.FilterGenerated(), report.ShortRange())
- }
- }
- }
- }
- return nil, nil
-}
-
-func CheckSillyRegexp(pass *analysis.Pass) (interface{}, error) {
- // We could use the rule checking engine for this, but the
- // arguments aren't really invalid.
- for _, fn := range pass.ResultOf[buildir.Analyzer].(*buildir.IR).SrcFuncs {
- for _, b := range fn.Blocks {
- for _, ins := range b.Instrs {
- call, ok := ins.(*ir.Call)
- if !ok {
- continue
- }
- if !code.IsCallToAny(call.Common(), "regexp.MustCompile", "regexp.Compile", "regexp.Match", "regexp.MatchReader", "regexp.MatchString") {
- continue
- }
- c, ok := call.Common().Args[0].(*ir.Const)
- if !ok {
- continue
- }
- s := constant.StringVal(c.Value)
- re, err := syntax.Parse(s, 0)
- if err != nil {
- continue
- }
- if re.Op != syntax.OpLiteral && re.Op != syntax.OpEmptyMatch {
- continue
- }
- report.Report(pass, call, "regular expression does not contain any meta characters")
- }
- }
- }
- return nil, nil
-}
-
-func CheckMissingEnumTypesInDeclaration(pass *analysis.Pass) (interface{}, error) {
- fn := func(node ast.Node) {
- decl := node.(*ast.GenDecl)
- if !decl.Lparen.IsValid() {
- return
- }
- if decl.Tok != token.CONST {
- return
- }
-
- groups := code.GroupSpecs(pass.Fset, decl.Specs)
- groupLoop:
- for _, group := range groups {
- if len(group) < 2 {
- continue
- }
- if group[0].(*ast.ValueSpec).Type == nil {
- // first constant doesn't have a type
- continue groupLoop
- }
- for i, spec := range group {
- spec := spec.(*ast.ValueSpec)
- if len(spec.Names) != 1 || len(spec.Values) != 1 {
- continue groupLoop
- }
- switch v := spec.Values[0].(type) {
- case *ast.BasicLit:
- case *ast.UnaryExpr:
- if _, ok := v.X.(*ast.BasicLit); !ok {
- continue groupLoop
- }
- default:
- // if it's not a literal it might be typed, such as
- // time.Microsecond = 1000 * Nanosecond
- continue groupLoop
- }
- if i == 0 {
- continue
- }
- if spec.Type != nil {
- continue groupLoop
- }
- }
- var edits []analysis.TextEdit
- typ := group[0].(*ast.ValueSpec).Type
- for _, spec := range group[1:] {
- nspec := *spec.(*ast.ValueSpec)
- nspec.Type = typ
- edits = append(edits, edit.ReplaceWithNode(pass.Fset, spec, &nspec))
- }
- report.Report(pass, group[0], "only the first constant in this group has an explicit type", report.Fixes(edit.Fix("add type to all constants in group", edits...)))
- }
- }
- code.Preorder(pass, fn, (*ast.GenDecl)(nil))
- return nil, nil
-}
-
-func CheckTimerResetReturnValue(pass *analysis.Pass) (interface{}, error) {
- for _, fn := range pass.ResultOf[buildir.Analyzer].(*buildir.IR).SrcFuncs {
- for _, block := range fn.Blocks {
- for _, ins := range block.Instrs {
- call, ok := ins.(*ir.Call)
- if !ok {
- continue
- }
- if !code.IsCallTo(call.Common(), "(*time.Timer).Reset") {
- continue
- }
- refs := call.Referrers()
- if refs == nil {
- continue
- }
- for _, ref := range code.FilterDebug(*refs) {
- ifstmt, ok := ref.(*ir.If)
- if !ok {
- continue
- }
-
- found := false
- for _, succ := range ifstmt.Block().Succs {
- if len(succ.Preds) != 1 {
- // Merge point, not a branch in the
- // syntactical sense.
-
- // FIXME(dh): this is broken for if
- // statements a la "if x || y"
- continue
- }
- irutil.Walk(succ, func(b *ir.BasicBlock) bool {
- if !succ.Dominates(b) {
- // We've reached the end of the branch
- return false
- }
- for _, ins := range b.Instrs {
- // TODO(dh): we should check that
- // we're receiving from the channel of
- // a time.Timer to further reduce
- // false positives. Not a key
- // priority, considering the rarity of
- // Reset and the tiny likeliness of a
- // false positive
- if ins, ok := ins.(*ir.Recv); ok && code.IsType(ins.Chan.Type(), "<-chan time.Time") {
- found = true
- return false
- }
- }
- return true
- })
- }
-
- if found {
- report.Report(pass, call, "it is not possible to use Reset's return value correctly, as there is a race condition between draining the channel and the new timer expiring")
- }
- }
- }
- }
- }
- return nil, nil
-}
-
-var (
- checkToLowerToUpperComparisonQ = pattern.MustParse(`
- (BinaryExpr
- (CallExpr fun@(Function (Or "strings.ToLower" "strings.ToUpper")) [a])
- tok@(Or "==" "!=")
- (CallExpr fun [b]))`)
- checkToLowerToUpperComparisonR = pattern.MustParse(`(CallExpr (SelectorExpr (Ident "strings") (Ident "EqualFold")) [a b])`)
-)
-
-func CheckToLowerToUpperComparison(pass *analysis.Pass) (interface{}, error) {
- fn := func(node ast.Node) {
- m, ok := Match(pass, checkToLowerToUpperComparisonQ, node)
- if !ok {
- return
- }
- rn := pattern.NodeToAST(checkToLowerToUpperComparisonR.Root, m.State).(ast.Expr)
- if m.State["tok"].(token.Token) == token.NEQ {
- rn = &ast.UnaryExpr{
- Op: token.NOT,
- X: rn,
- }
- }
-
- report.Report(pass, node, "should use strings.EqualFold instead", report.Fixes(edit.Fix("replace with strings.EqualFold", edit.ReplaceWithNode(pass.Fset, node, rn))))
- }
-
- code.Preorder(pass, fn, (*ast.BinaryExpr)(nil))
- return nil, nil
-}
-
-func CheckUnreachableTypeCases(pass *analysis.Pass) (interface{}, error) {
- // Check if T subsumes V in a type switch. T subsumes V if T is an interface and T's method set is a subset of V's method set.
- subsumes := func(T, V types.Type) bool {
- tIface, ok := T.Underlying().(*types.Interface)
- if !ok {
- return false
- }
-
- return types.Implements(V, tIface)
- }
-
- subsumesAny := func(Ts, Vs []types.Type) (types.Type, types.Type, bool) {
- for _, T := range Ts {
- for _, V := range Vs {
- if subsumes(T, V) {
- return T, V, true
- }
- }
- }
-
- return nil, nil, false
- }
-
- fn := func(node ast.Node) {
- tsStmt := node.(*ast.TypeSwitchStmt)
-
- type ccAndTypes struct {
- cc *ast.CaseClause
- types []types.Type
- }
-
- // All asserted types in the order of case clauses.
- ccs := make([]ccAndTypes, 0, len(tsStmt.Body.List))
- for _, stmt := range tsStmt.Body.List {
- cc, _ := stmt.(*ast.CaseClause)
-
- // Exclude the 'default' case.
- if len(cc.List) == 0 {
- continue
- }
-
- Ts := make([]types.Type, len(cc.List))
- for i, expr := range cc.List {
- Ts[i] = pass.TypesInfo.TypeOf(expr)
- }
-
- ccs = append(ccs, ccAndTypes{cc: cc, types: Ts})
- }
-
- if len(ccs) <= 1 {
- // Zero or one case clauses, nothing to check.
- return
- }
-
- // Check if case clauses following cc have types that are subsumed by cc.
- for i, cc := range ccs[:len(ccs)-1] {
- for _, next := range ccs[i+1:] {
- if T, V, yes := subsumesAny(cc.types, next.types); yes {
- report.Report(pass, next.cc, fmt.Sprintf("unreachable case clause: %s will always match before %s", T.String(), V.String()),
- report.ShortRange())
- }
- }
- }
- }
-
- code.Preorder(pass, fn, (*ast.TypeSwitchStmt)(nil))
- return nil, nil
-}
-
-var checkSingleArgAppendQ = pattern.MustParse(`(CallExpr (Builtin "append") [_])`)
-
-func CheckSingleArgAppend(pass *analysis.Pass) (interface{}, error) {
- fn := func(node ast.Node) {
- _, ok := Match(pass, checkSingleArgAppendQ, node)
- if !ok {
- return
- }
- report.Report(pass, node, "x = append(y) is equivalent to x = y", report.FilterGenerated())
- }
- code.Preorder(pass, fn, (*ast.CallExpr)(nil))
- return nil, nil
-}
-
-func CheckStructTags(pass *analysis.Pass) (interface{}, error) {
- importsGoFlags := false
-
- // we use the AST instead of (*types.Package).Imports to work
- // around vendored packages in GOPATH mode. A vendored package's
- // path will include the vendoring subtree as a prefix.
- for _, f := range pass.Files {
- for _, imp := range f.Imports {
- v := imp.Path.Value
- if v[1:len(v)-1] == "github.com/jessevdk/go-flags" {
- importsGoFlags = true
- break
- }
- }
- }
-
- fn := func(node ast.Node) {
- for _, field := range node.(*ast.StructType).Fields.List {
- if field.Tag == nil {
- continue
- }
- tags, err := parseStructTag(field.Tag.Value[1 : len(field.Tag.Value)-1])
- if err != nil {
- report.Report(pass, field.Tag, fmt.Sprintf("unparseable struct tag: %s", err))
- continue
- }
- for k, v := range tags {
- if len(v) > 1 {
- isGoFlagsTag := importsGoFlags &&
- (k == "choice" || k == "optional-value" || k == "default")
- if !isGoFlagsTag {
- report.Report(pass, field.Tag, fmt.Sprintf("duplicate struct tag %q", k))
- }
- }
-
- switch k {
- case "json":
- checkJSONTag(pass, field, v[0])
- case "xml":
- checkXMLTag(pass, field, v[0])
- }
- }
- }
- }
- code.Preorder(pass, fn, (*ast.StructType)(nil))
- return nil, nil
-}
-
-func checkJSONTag(pass *analysis.Pass, field *ast.Field, tag string) {
- if pass.Pkg.Path() == "encoding/json" || pass.Pkg.Path() == "encoding/json_test" {
- // don't flag malformed JSON tags in the encoding/json
- // package; it knows what it is doing, and it is testing
- // itself.
- return
- }
- //lint:ignore SA9003 TODO(dh): should we flag empty tags?
- if len(tag) == 0 {
- }
- fields := strings.Split(tag, ",")
- for _, r := range fields[0] {
- if !unicode.IsLetter(r) && !unicode.IsDigit(r) && !strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", r) {
- report.Report(pass, field.Tag, fmt.Sprintf("invalid JSON field name %q", fields[0]))
- }
- }
- var co, cs, ci int
- for _, s := range fields[1:] {
- switch s {
- case "omitempty":
- co++
- case "":
- // allow stuff like "-,"
- case "string":
- cs++
- // only for string, floating point, integer and bool
- T := code.Dereference(pass.TypesInfo.TypeOf(field.Type).Underlying()).Underlying()
- basic, ok := T.(*types.Basic)
- if !ok || (basic.Info()&(types.IsBoolean|types.IsInteger|types.IsFloat|types.IsString)) == 0 {
- report.Report(pass, field.Tag, "the JSON string option only applies to fields of type string, floating point, integer or bool, or pointers to those")
- }
- case "inline":
- ci++
- default:
- report.Report(pass, field.Tag, fmt.Sprintf("unknown JSON option %q", s))
- }
- }
- if co > 1 {
- report.Report(pass, field.Tag, `duplicate JSON option "omitempty"`)
- }
- if cs > 1 {
- report.Report(pass, field.Tag, `duplicate JSON option "string"`)
- }
- if ci > 1 {
- report.Report(pass, field.Tag, `duplicate JSON option "inline"`)
- }
-}
-
-func checkXMLTag(pass *analysis.Pass, field *ast.Field, tag string) {
- //lint:ignore SA9003 TODO(dh): should we flag empty tags?
- if len(tag) == 0 {
- }
- fields := strings.Split(tag, ",")
- counts := map[string]int{}
- var exclusives []string
- for _, s := range fields[1:] {
- switch s {
- case "attr", "chardata", "cdata", "innerxml", "comment":
- counts[s]++
- if counts[s] == 1 {
- exclusives = append(exclusives, s)
- }
- case "omitempty", "any":
- counts[s]++
- case "":
- default:
- report.Report(pass, field.Tag, fmt.Sprintf("unknown XML option %q", s))
- }
- }
- for k, v := range counts {
- if v > 1 {
- report.Report(pass, field.Tag, fmt.Sprintf("duplicate XML option %q", k))
- }
- }
- if len(exclusives) > 1 {
- report.Report(pass, field.Tag, fmt.Sprintf("XML options %s are mutually exclusive", strings.Join(exclusives, " and ")))
- }
-}
-
-func CheckImpossibleTypeAssertion(pass *analysis.Pass) (interface{}, error) {
- type entry struct {
- l, r *types.Func
- }
-
- msc := &pass.ResultOf[buildir.Analyzer].(*buildir.IR).Pkg.Prog.MethodSets
- for _, fn := range pass.ResultOf[buildir.Analyzer].(*buildir.IR).SrcFuncs {
- for _, b := range fn.Blocks {
- for _, instr := range b.Instrs {
- assert, ok := instr.(*ir.TypeAssert)
- if !ok {
- continue
- }
- var wrong []entry
- left := assert.X.Type()
- right := assert.AssertedType
- righti, ok := right.Underlying().(*types.Interface)
-
- if !ok {
- // We only care about interface->interface
- // assertions. The Go compiler already catches
- // impossible interface->concrete assertions.
- continue
- }
-
- ms := msc.MethodSet(left)
- for i := 0; i < righti.NumMethods(); i++ {
- mr := righti.Method(i)
- sel := ms.Lookup(mr.Pkg(), mr.Name())
- if sel == nil {
- continue
- }
- ml := sel.Obj().(*types.Func)
- if types.AssignableTo(ml.Type(), mr.Type()) {
- continue
- }
-
- wrong = append(wrong, entry{ml, mr})
- }
-
- if len(wrong) != 0 {
- s := fmt.Sprintf("impossible type assertion; %s and %s contradict each other:",
- types.TypeString(left, types.RelativeTo(pass.Pkg)),
- types.TypeString(right, types.RelativeTo(pass.Pkg)))
- for _, e := range wrong {
- s += fmt.Sprintf("\n\twrong type for %s method", e.l.Name())
- s += fmt.Sprintf("\n\t\thave %s", e.l.Type())
- s += fmt.Sprintf("\n\t\twant %s", e.r.Type())
- }
- report.Report(pass, assert, s)
- }
- }
- }
- }
- return nil, nil
-}
-
-func checkWithValueKey(call *Call) {
- arg := call.Args[1]
- T := arg.Value.Value.Type()
- if T, ok := T.(*types.Basic); ok {
- arg.Invalid(
- fmt.Sprintf("should not use built-in type %s as key for value; define your own type to avoid collisions", T))
- }
- if !types.Comparable(T) {
- arg.Invalid(fmt.Sprintf("keys used with context.WithValue must be comparable, but type %s is not comparable", T))
- }
-}
-
-func CheckMaybeNil(pass *analysis.Pass) (interface{}, error) {
- // This is an extremely trivial check that doesn't try to reason
- // about control flow. That is, phis and sigmas do not propagate
- // any information. As such, we can flag this:
- //
- // _ = *x
- // if x == nil { return }
- //
- // but we cannot flag this:
- //
- // if x == nil { println(x) }
- // _ = *x
- //
- // nor many other variations of conditional uses of or assignments to x.
- //
- // However, even this trivial implementation finds plenty of
- // real-world bugs, such as dereference before nil pointer check,
- // or using t.Error instead of t.Fatal when encountering nil
- // pointers.
- //
- // On the flip side, our naive implementation avoids false positives in branches, such as
- //
- // if x != nil { _ = *x }
- //
- // due to the same lack of propagating information through sigma
- // nodes. x inside the branch will be independent of the x in the
- // nil pointer check.
- //
- //
- // We could implement a more powerful check, but then we'd be
- // getting false positives instead of false negatives because
- // we're incapable of deducing relationships between variables.
- // For example, a function might return a pointer and an error,
- // and the error being nil guarantees that the pointer is not nil.
- // Depending on the surrounding code, the pointer may still end up
- // being checked against nil in one place, and guarded by a check
- // on the error in another, which would lead to us marking some
- // loads as unsafe.
- //
- // Unfortunately, simply hard-coding the relationship between
- // return values wouldn't eliminate all false positives, either.
- // Many other more subtle relationships exist. An abridged example
- // from real code:
- //
- // if a == nil && b == nil { return }
- // c := fn(a)
- // if c != "" { _ = *a }
- //
- // where `fn` is guaranteed to return a non-empty string if a
- // isn't nil.
- //
- // We choose to err on the side of false negatives.
-
- isNilConst := func(v ir.Value) bool {
- if code.IsPointerLike(v.Type()) {
- if k, ok := v.(*ir.Const); ok {
- return k.IsNil()
- }
- }
- return false
- }
-
- for _, fn := range pass.ResultOf[buildir.Analyzer].(*buildir.IR).SrcFuncs {
- maybeNil := map[ir.Value]ir.Instruction{}
- for _, b := range fn.Blocks {
- for _, instr := range b.Instrs {
- if instr, ok := instr.(*ir.BinOp); ok {
- var ptr ir.Value
- if isNilConst(instr.X) {
- ptr = instr.Y
- } else if isNilConst(instr.Y) {
- ptr = instr.X
- }
- maybeNil[ptr] = instr
- }
- }
- }
-
- for _, b := range fn.Blocks {
- for _, instr := range b.Instrs {
- var ptr ir.Value
- switch instr := instr.(type) {
- case *ir.Load:
- ptr = instr.X
- case *ir.Store:
- ptr = instr.Addr
- case *ir.IndexAddr:
- ptr = instr.X
- case *ir.FieldAddr:
- ptr = instr.X
- }
- if ptr != nil {
- switch ptr.(type) {
- case *ir.Alloc, *ir.FieldAddr, *ir.IndexAddr:
- // these cannot be nil
- continue
- }
- if r, ok := maybeNil[ptr]; ok {
- report.Report(pass, instr, "possible nil pointer dereference",
- report.Related(r, "this check suggests that the pointer can be nil"))
- }
- }
- }
- }
- }
-
- return nil, nil
-}
-
-var checkAddressIsNilQ = pattern.MustParse(
- `(BinaryExpr
- (UnaryExpr "&" _)
- (Or "==" "!=")
- (Builtin "nil"))`)
-
-func CheckAddressIsNil(pass *analysis.Pass) (interface{}, error) {
- fn := func(node ast.Node) {
- _, ok := Match(pass, checkAddressIsNilQ, node)
- if !ok {
- return
- }
- report.Report(pass, node, "the address of a variable cannot be nil")
- }
- code.Preorder(pass, fn, (*ast.BinaryExpr)(nil))
- return nil, nil
-}
diff --git a/vendor/honnef.co/go/tools/staticcheck/rules.go b/vendor/honnef.co/go/tools/staticcheck/rules.go
deleted file mode 100644
index 57f7282dede..00000000000
--- a/vendor/honnef.co/go/tools/staticcheck/rules.go
+++ /dev/null
@@ -1,315 +0,0 @@
-package staticcheck
-
-import (
- "fmt"
- "go/constant"
- "go/types"
- "net"
- "net/url"
- "regexp"
- "sort"
- "strconv"
- "strings"
- "time"
- "unicode/utf8"
-
- "golang.org/x/tools/go/analysis"
- "honnef.co/go/tools/code"
- "honnef.co/go/tools/ir"
-)
-
-const (
- MsgInvalidHostPort = "invalid port or service name in host:port pair"
- MsgInvalidUTF8 = "argument is not a valid UTF-8 encoded string"
- MsgNonUniqueCutset = "cutset contains duplicate characters"
-)
-
-type Call struct {
- Pass *analysis.Pass
- Instr ir.CallInstruction
- Args []*Argument
-
- Parent *ir.Function
-
- invalids []string
-}
-
-func (c *Call) Invalid(msg string) {
- c.invalids = append(c.invalids, msg)
-}
-
-type Argument struct {
- Value Value
- invalids []string
-}
-
-type Value struct {
- Value ir.Value
-}
-
-func (arg *Argument) Invalid(msg string) {
- arg.invalids = append(arg.invalids, msg)
-}
-
-type CallCheck func(call *Call)
-
-func extractConsts(v ir.Value) []*ir.Const {
- switch v := v.(type) {
- case *ir.Const:
- return []*ir.Const{v}
- case *ir.MakeInterface:
- return extractConsts(v.X)
- default:
- return nil
- }
-}
-
-func ValidateRegexp(v Value) error {
- for _, c := range extractConsts(v.Value) {
- if c.Value == nil {
- continue
- }
- if c.Value.Kind() != constant.String {
- continue
- }
- s := constant.StringVal(c.Value)
- if _, err := regexp.Compile(s); err != nil {
- return err
- }
- }
- return nil
-}
-
-func ValidateTimeLayout(v Value) error {
- for _, c := range extractConsts(v.Value) {
- if c.Value == nil {
- continue
- }
- if c.Value.Kind() != constant.String {
- continue
- }
- s := constant.StringVal(c.Value)
- s = strings.Replace(s, "_", " ", -1)
- s = strings.Replace(s, "Z", "-", -1)
- _, err := time.Parse(s, s)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-func ValidateURL(v Value) error {
- for _, c := range extractConsts(v.Value) {
- if c.Value == nil {
- continue
- }
- if c.Value.Kind() != constant.String {
- continue
- }
- s := constant.StringVal(c.Value)
- _, err := url.Parse(s)
- if err != nil {
- return fmt.Errorf("%q is not a valid URL: %s", s, err)
- }
- }
- return nil
-}
-
-func InvalidUTF8(v Value) bool {
- for _, c := range extractConsts(v.Value) {
- if c.Value == nil {
- continue
- }
- if c.Value.Kind() != constant.String {
- continue
- }
- s := constant.StringVal(c.Value)
- if !utf8.ValidString(s) {
- return true
- }
- }
- return false
-}
-
-func UnbufferedChannel(v Value) bool {
- // TODO(dh): this check of course misses many cases of unbuffered
- // channels, such as any in phi or sigma nodes. We'll eventually
- // replace this function.
- val := v.Value
- if ct, ok := val.(*ir.ChangeType); ok {
- val = ct.X
- }
- mk, ok := val.(*ir.MakeChan)
- if !ok {
- return false
- }
- if k, ok := mk.Size.(*ir.Const); ok && k.Value.Kind() == constant.Int {
- if v, ok := constant.Int64Val(k.Value); ok && v == 0 {
- return true
- }
- }
- return false
-}
-
-func Pointer(v Value) bool {
- switch v.Value.Type().Underlying().(type) {
- case *types.Pointer, *types.Interface:
- return true
- }
- return false
-}
-
-func ConvertedFromInt(v Value) bool {
- conv, ok := v.Value.(*ir.Convert)
- if !ok {
- return false
- }
- b, ok := conv.X.Type().Underlying().(*types.Basic)
- if !ok {
- return false
- }
- if (b.Info() & types.IsInteger) == 0 {
- return false
- }
- return true
-}
-
-func validEncodingBinaryType(pass *analysis.Pass, typ types.Type) bool {
- typ = typ.Underlying()
- switch typ := typ.(type) {
- case *types.Basic:
- switch typ.Kind() {
- case types.Uint8, types.Uint16, types.Uint32, types.Uint64,
- types.Int8, types.Int16, types.Int32, types.Int64,
- types.Float32, types.Float64, types.Complex64, types.Complex128, types.Invalid:
- return true
- case types.Bool:
- return code.IsGoVersion(pass, 8)
- }
- return false
- case *types.Struct:
- n := typ.NumFields()
- for i := 0; i < n; i++ {
- if !validEncodingBinaryType(pass, typ.Field(i).Type()) {
- return false
- }
- }
- return true
- case *types.Array:
- return validEncodingBinaryType(pass, typ.Elem())
- case *types.Interface:
- // we can't determine if it's a valid type or not
- return true
- }
- return false
-}
-
-func CanBinaryMarshal(pass *analysis.Pass, v Value) bool {
- typ := v.Value.Type().Underlying()
- if ttyp, ok := typ.(*types.Pointer); ok {
- typ = ttyp.Elem().Underlying()
- }
- if ttyp, ok := typ.(interface {
- Elem() types.Type
- }); ok {
- if _, ok := ttyp.(*types.Pointer); !ok {
- typ = ttyp.Elem()
- }
- }
-
- return validEncodingBinaryType(pass, typ)
-}
-
-func RepeatZeroTimes(name string, arg int) CallCheck {
- return func(call *Call) {
- arg := call.Args[arg]
- if k, ok := arg.Value.Value.(*ir.Const); ok && k.Value.Kind() == constant.Int {
- if v, ok := constant.Int64Val(k.Value); ok && v == 0 {
- arg.Invalid(fmt.Sprintf("calling %s with n == 0 will return no results, did you mean -1?", name))
- }
- }
- }
-}
-
-func validateServiceName(s string) bool {
- if len(s) < 1 || len(s) > 15 {
- return false
- }
- if s[0] == '-' || s[len(s)-1] == '-' {
- return false
- }
- if strings.Contains(s, "--") {
- return false
- }
- hasLetter := false
- for _, r := range s {
- if (r >= 'A' && r <= 'Z') || (r >= 'a' && r <= 'z') {
- hasLetter = true
- continue
- }
- if r >= '0' && r <= '9' {
- continue
- }
- return false
- }
- return hasLetter
-}
-
-func validatePort(s string) bool {
- n, err := strconv.ParseInt(s, 10, 64)
- if err != nil {
- return validateServiceName(s)
- }
- return n >= 0 && n <= 65535
-}
-
-func ValidHostPort(v Value) bool {
- for _, k := range extractConsts(v.Value) {
- if k.Value == nil {
- continue
- }
- if k.Value.Kind() != constant.String {
- continue
- }
- s := constant.StringVal(k.Value)
- _, port, err := net.SplitHostPort(s)
- if err != nil {
- return false
- }
- // TODO(dh): check hostname
- if !validatePort(port) {
- return false
- }
- }
- return true
-}
-
-// ConvertedFrom reports whether value v was converted from type typ.
-func ConvertedFrom(v Value, typ string) bool {
- change, ok := v.Value.(*ir.ChangeType)
- return ok && code.IsType(change.X.Type(), typ)
-}
-
-func UniqueStringCutset(v Value) bool {
- for _, c := range extractConsts(v.Value) {
- if c.Value == nil {
- continue
- }
- if c.Value.Kind() != constant.String {
- continue
- }
- s := constant.StringVal(c.Value)
- rs := runeSlice(s)
- if len(rs) < 2 {
- continue
- }
- sort.Sort(rs)
- for i, r := range rs[1:] {
- if rs[i] == r {
- return false
- }
- }
- }
- return true
-}
diff --git a/vendor/honnef.co/go/tools/staticcheck/structtag.go b/vendor/honnef.co/go/tools/staticcheck/structtag.go
deleted file mode 100644
index 38830a22c63..00000000000
--- a/vendor/honnef.co/go/tools/staticcheck/structtag.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Copyright 2019 Dominik Honnef. All rights reserved.
-
-package staticcheck
-
-import "strconv"
-
-func parseStructTag(tag string) (map[string][]string, error) {
- // FIXME(dh): detect missing closing quote
- out := map[string][]string{}
-
- for tag != "" {
- // Skip leading space.
- i := 0
- for i < len(tag) && tag[i] == ' ' {
- i++
- }
- tag = tag[i:]
- if tag == "" {
- break
- }
-
- // Scan to colon. A space, a quote or a control character is a syntax error.
- // Strictly speaking, control chars include the range [0x7f, 0x9f], not just
- // [0x00, 0x1f], but in practice, we ignore the multi-byte control characters
- // as it is simpler to inspect the tag's bytes than the tag's runes.
- i = 0
- for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f {
- i++
- }
- if i == 0 || i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' {
- break
- }
- name := string(tag[:i])
- tag = tag[i+1:]
-
- // Scan quoted string to find value.
- i = 1
- for i < len(tag) && tag[i] != '"' {
- if tag[i] == '\\' {
- i++
- }
- i++
- }
- if i >= len(tag) {
- break
- }
- qvalue := string(tag[:i+1])
- tag = tag[i+1:]
-
- value, err := strconv.Unquote(qvalue)
- if err != nil {
- return nil, err
- }
- out[name] = append(out[name], value)
- }
- return out, nil
-}
diff --git a/vendor/honnef.co/go/tools/stylecheck/analysis.go b/vendor/honnef.co/go/tools/stylecheck/analysis.go
deleted file mode 100644
index 0f93f44365d..00000000000
--- a/vendor/honnef.co/go/tools/stylecheck/analysis.go
+++ /dev/null
@@ -1,81 +0,0 @@
-package stylecheck
-
-import (
- "golang.org/x/tools/go/analysis"
- "golang.org/x/tools/go/analysis/passes/inspect"
- "honnef.co/go/tools/config"
- "honnef.co/go/tools/facts"
- "honnef.co/go/tools/internal/passes/buildir"
- "honnef.co/go/tools/lint/lintutil"
-)
-
-var Analyzers = lintutil.InitializeAnalyzers(Docs, map[string]*analysis.Analyzer{
- "ST1000": {
- Run: CheckPackageComment,
- },
- "ST1001": {
- Run: CheckDotImports,
- Requires: []*analysis.Analyzer{facts.Generated, config.Analyzer},
- },
- "ST1003": {
- Run: CheckNames,
- Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated, config.Analyzer},
- },
- "ST1005": {
- Run: CheckErrorStrings,
- Requires: []*analysis.Analyzer{buildir.Analyzer},
- },
- "ST1006": {
- Run: CheckReceiverNames,
- Requires: []*analysis.Analyzer{buildir.Analyzer, facts.Generated},
- },
- "ST1008": {
- Run: CheckErrorReturn,
- Requires: []*analysis.Analyzer{buildir.Analyzer},
- },
- "ST1011": {
- Run: CheckTimeNames,
- Requires: []*analysis.Analyzer{inspect.Analyzer},
- },
- "ST1012": {
- Run: CheckErrorVarNames,
- Requires: []*analysis.Analyzer{config.Analyzer},
- },
- "ST1013": {
- Run: CheckHTTPStatusCodes,
- // TODO(dh): why does this depend on facts.TokenFile?
- Requires: []*analysis.Analyzer{facts.Generated, facts.TokenFile, config.Analyzer, inspect.Analyzer},
- },
- "ST1015": {
- Run: CheckDefaultCaseOrder,
- Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated, facts.TokenFile},
- },
- "ST1016": {
- Run: CheckReceiverNamesIdentical,
- Requires: []*analysis.Analyzer{buildir.Analyzer, facts.Generated},
- },
- "ST1017": {
- Run: CheckYodaConditions,
- Requires: []*analysis.Analyzer{inspect.Analyzer, facts.Generated, facts.TokenFile},
- },
- "ST1018": {
- Run: CheckInvisibleCharacters,
- Requires: []*analysis.Analyzer{inspect.Analyzer},
- },
- "ST1019": {
- Run: CheckDuplicatedImports,
- Requires: []*analysis.Analyzer{facts.Generated, config.Analyzer},
- },
- "ST1020": {
- Run: CheckExportedFunctionDocs,
- Requires: []*analysis.Analyzer{facts.Generated, inspect.Analyzer},
- },
- "ST1021": {
- Run: CheckExportedTypeDocs,
- Requires: []*analysis.Analyzer{facts.Generated, inspect.Analyzer},
- },
- "ST1022": {
- Run: CheckExportedVarDocs,
- Requires: []*analysis.Analyzer{facts.Generated, inspect.Analyzer},
- },
-})
diff --git a/vendor/honnef.co/go/tools/stylecheck/doc.go b/vendor/honnef.co/go/tools/stylecheck/doc.go
deleted file mode 100644
index b8e7f3f9e4a..00000000000
--- a/vendor/honnef.co/go/tools/stylecheck/doc.go
+++ /dev/null
@@ -1,231 +0,0 @@
-package stylecheck
-
-import "honnef.co/go/tools/lint"
-
-var Docs = map[string]*lint.Documentation{
- "ST1000": {
- Title: `Incorrect or missing package comment`,
- Text: `Packages must have a package comment that is formatted according to
-the guidelines laid out in
-https://github.com/golang/go/wiki/CodeReviewComments#package-comments.`,
- Since: "2019.1",
- NonDefault: true,
- },
-
- "ST1001": {
- Title: `Dot imports are discouraged`,
- Text: `Dot imports that aren't in external test packages are discouraged.
-
-The dot_import_whitelist option can be used to whitelist certain
-imports.
-
-Quoting Go Code Review Comments:
-
- The import . form can be useful in tests that, due to circular
- dependencies, cannot be made part of the package being tested:
-
- package foo_test
-
- import (
- "bar/testutil" // also imports "foo"
- . "foo"
- )
-
- In this case, the test file cannot be in package foo because it
- uses bar/testutil, which imports foo. So we use the 'import .'
- form to let the file pretend to be part of package foo even though
- it is not. Except for this one case, do not use import . in your
- programs. It makes the programs much harder to read because it is
- unclear whether a name like Quux is a top-level identifier in the
- current package or in an imported package.`,
- Since: "2019.1",
- Options: []string{"dot_import_whitelist"},
- },
-
- "ST1003": {
- Title: `Poorly chosen identifier`,
- Text: `Identifiers, such as variable and package names, follow certain rules.
-
-See the following links for details:
-
-- https://golang.org/doc/effective_go.html#package-names
-- https://golang.org/doc/effective_go.html#mixed-caps
-- https://github.com/golang/go/wiki/CodeReviewComments#initialisms
-- https://github.com/golang/go/wiki/CodeReviewComments#variable-names`,
- Since: "2019.1",
- NonDefault: true,
- Options: []string{"initialisms"},
- },
-
- "ST1005": {
- Title: `Incorrectly formatted error string`,
- Text: `Error strings follow a set of guidelines to ensure uniformity and good
-composability.
-
-Quoting Go Code Review Comments:
-
- Error strings should not be capitalized (unless beginning with
- proper nouns or acronyms) or end with punctuation, since they are
- usually printed following other context. That is, use
- fmt.Errorf("something bad") not fmt.Errorf("Something bad"), so
- that log.Printf("Reading %s: %v", filename, err) formats without a
- spurious capital letter mid-message.`,
- Since: "2019.1",
- },
-
- "ST1006": {
- Title: `Poorly chosen receiver name`,
- Text: `Quoting Go Code Review Comments:
-
- The name of a method's receiver should be a reflection of its
- identity; often a one or two letter abbreviation of its type
- suffices (such as "c" or "cl" for "Client"). Don't use generic
- names such as "me", "this" or "self", identifiers typical of
- object-oriented languages that place more emphasis on methods as
- opposed to functions. The name need not be as descriptive as that
- of a method argument, as its role is obvious and serves no
- documentary purpose. It can be very short as it will appear on
- almost every line of every method of the type; familiarity admits
- brevity. Be consistent, too: if you call the receiver "c" in one
- method, don't call it "cl" in another.`,
- Since: "2019.1",
- },
-
- "ST1008": {
- Title: `A function's error value should be its last return value`,
- Text: `A function's error value should be its last return value.`,
- Since: `2019.1`,
- },
-
- "ST1011": {
- Title: `Poorly chosen name for variable of type time.Duration`,
- Text: `time.Duration values represent an amount of time, which is represented
-as a count of nanoseconds. An expression like 5 * time.Microsecond
-yields the value 5000. It is therefore not appropriate to suffix a
-variable of type time.Duration with any time unit, such as Msec or
-Milli.`,
- Since: `2019.1`,
- },
-
- "ST1012": {
- Title: `Poorly chosen name for error variable`,
- Text: `Error variables that are part of an API should be called errFoo or
-ErrFoo.`,
- Since: "2019.1",
- },
-
- "ST1013": {
- Title: `Should use constants for HTTP error codes, not magic numbers`,
- Text: `HTTP has a tremendous number of status codes. While some of those are
-well known (200, 400, 404, 500), most of them are not. The net/http
-package provides constants for all status codes that are part of the
-various specifications. It is recommended to use these constants
-instead of hard-coding magic numbers, to vastly improve the
-readability of your code.`,
- Since: "2019.1",
- Options: []string{"http_status_code_whitelist"},
- },
-
- "ST1015": {
- Title: `A switch's default case should be the first or last case`,
- Since: "2019.1",
- },
-
- "ST1016": {
- Title: `Use consistent method receiver names`,
- Since: "2019.1",
- NonDefault: true,
- },
-
- "ST1017": {
- Title: `Don't use Yoda conditions`,
- Text: `Yoda conditions are conditions of the kind 'if 42 == x', where the
-literal is on the left side of the comparison. These are a common
-idiom in languages in which assignment is an expression, to avoid bugs
-of the kind 'if (x = 42)'. In Go, which doesn't allow for this kind of
-bug, we prefer the more idiomatic 'if x == 42'.`,
- Since: "2019.2",
- },
-
- "ST1018": {
- Title: `Avoid zero-width and control characters in string literals`,
- Since: "2019.2",
- },
-
- "ST1019": {
- Title: `Importing the same package multiple times`,
- Text: `Go allows importing the same package multiple times, as long as
-different import aliases are being used. That is, the following
-bit of code is valid:
-
-import (
- "fmt"
- fumpt "fmt"
- format "fmt"
- _ "fmt"
-)
-
-However, this is very rarely done on purpose. Usually, it is a
-sign of code that got refactored, accidentally adding duplicate
-import statements. It is also a rarely known feature, which may
-contribute to confusion.
-
-Do note that sometimes, this feature may be used
-intentionally (see for example
-https://github.com/golang/go/commit/3409ce39bfd7584523b7a8c150a310cea92d879d)
-– if you want to allow this pattern in your code base, you're
-advised to disable this check.`,
- Since: "2020.1",
- },
-
- "ST1020": {
- Title: "The documentation of an exported function should start with the function's name",
- Text: `Doc comments work best as complete sentences, which
-allow a wide variety of automated presentations. The first sentence
-should be a one-sentence summary that starts with the name being
-declared.
-
-If every doc comment begins with the name of the item it describes,
-you can use the doc subcommand of the go tool and run the output
-through grep.
-
-See https://golang.org/doc/effective_go.html#commentary for more
-information on how to write good documentation.`,
- Since: "2020.1",
- NonDefault: true,
- },
-
- "ST1021": {
- Title: "The documentation of an exported type should start with type's name",
- Text: `Doc comments work best as complete sentences, which
-allow a wide variety of automated presentations. The first sentence
-should be a one-sentence summary that starts with the name being
-declared.
-
-If every doc comment begins with the name of the item it describes,
-you can use the doc subcommand of the go tool and run the output
-through grep.
-
-See https://golang.org/doc/effective_go.html#commentary for more
-information on how to write good documentation.`,
- Since: "2020.1",
- NonDefault: true,
- },
-
- "ST1022": {
- Title: "The documentation of an exported variable or constant should start with variable's name",
- Text: `Doc comments work best as complete sentences, which
-allow a wide variety of automated presentations. The first sentence
-should be a one-sentence summary that starts with the name being
-declared.
-
-If every doc comment begins with the name of the item it describes,
-you can use the doc subcommand of the go tool and run the output
-through grep.
-
-See https://golang.org/doc/effective_go.html#commentary for more
-information on how to write good documentation.`,
- Since: "2020.1",
- NonDefault: true,
- },
-}
diff --git a/vendor/honnef.co/go/tools/stylecheck/lint.go b/vendor/honnef.co/go/tools/stylecheck/lint.go
deleted file mode 100644
index 75a0112b236..00000000000
--- a/vendor/honnef.co/go/tools/stylecheck/lint.go
+++ /dev/null
@@ -1,914 +0,0 @@
-package stylecheck // import "honnef.co/go/tools/stylecheck"
-
-import (
- "fmt"
- "go/ast"
- "go/constant"
- "go/token"
- "go/types"
- "sort"
- "strconv"
- "strings"
- "unicode"
- "unicode/utf8"
-
- "honnef.co/go/tools/code"
- "honnef.co/go/tools/config"
- "honnef.co/go/tools/edit"
- "honnef.co/go/tools/internal/passes/buildir"
- "honnef.co/go/tools/ir"
- . "honnef.co/go/tools/lint/lintdsl"
- "honnef.co/go/tools/pattern"
- "honnef.co/go/tools/report"
-
- "golang.org/x/tools/go/analysis"
- "golang.org/x/tools/go/analysis/passes/inspect"
- "golang.org/x/tools/go/ast/inspector"
- "golang.org/x/tools/go/types/typeutil"
-)
-
-func CheckPackageComment(pass *analysis.Pass) (interface{}, error) {
- // - At least one file in a non-main package should have a package comment
- //
- // - The comment should be of the form
- // "Package x ...". This has a slight potential for false
- // positives, as multiple files can have package comments, in
- // which case they get appended. But that doesn't happen a lot in
- // the real world.
-
- if pass.Pkg.Name() == "main" {
- return nil, nil
- }
- hasDocs := false
- for _, f := range pass.Files {
- if code.IsInTest(pass, f) {
- continue
- }
- if f.Doc != nil && len(f.Doc.List) > 0 {
- hasDocs = true
- prefix := "Package " + f.Name.Name + " "
- if !strings.HasPrefix(strings.TrimSpace(f.Doc.Text()), prefix) {
- report.Report(pass, f.Doc, fmt.Sprintf(`package comment should be of the form "%s..."`, prefix))
- }
- f.Doc.Text()
- }
- }
-
- if !hasDocs {
- for _, f := range pass.Files {
- if code.IsInTest(pass, f) {
- continue
- }
- report.Report(pass, f, "at least one file in a package should have a package comment", report.ShortRange())
- }
- }
- return nil, nil
-}
-
-func CheckDotImports(pass *analysis.Pass) (interface{}, error) {
- for _, f := range pass.Files {
- imports:
- for _, imp := range f.Imports {
- path := imp.Path.Value
- path = path[1 : len(path)-1]
- for _, w := range config.For(pass).DotImportWhitelist {
- if w == path {
- continue imports
- }
- }
-
- if imp.Name != nil && imp.Name.Name == "." && !code.IsInTest(pass, f) {
- report.Report(pass, imp, "should not use dot imports", report.FilterGenerated())
- }
- }
- }
- return nil, nil
-}
-
-func CheckDuplicatedImports(pass *analysis.Pass) (interface{}, error) {
- for _, f := range pass.Files {
- // Collect all imports by their import path
- imports := make(map[string][]*ast.ImportSpec, len(f.Imports))
- for _, imp := range f.Imports {
- imports[imp.Path.Value] = append(imports[imp.Path.Value], imp)
- }
-
- for path, value := range imports {
- if path[1:len(path)-1] == "unsafe" {
- // Don't flag unsafe. Cgo generated code imports
- // unsafe using the blank identifier, and most
- // user-written cgo code also imports unsafe
- // explicitly.
- continue
- }
- // If there's more than one import per path, we flag that
- if len(value) > 1 {
- s := fmt.Sprintf("package %s is being imported more than once", path)
- opts := []report.Option{report.FilterGenerated()}
- for _, imp := range value[1:] {
- opts = append(opts, report.Related(imp, fmt.Sprintf("other import of %s", path)))
- }
- report.Report(pass, value[0], s, opts...)
- }
- }
- }
- return nil, nil
-}
-
-func CheckBlankImports(pass *analysis.Pass) (interface{}, error) {
- fset := pass.Fset
- for _, f := range pass.Files {
- if code.IsMainLike(pass) || code.IsInTest(pass, f) {
- continue
- }
-
- // Collect imports of the form `import _ "foo"`, i.e. with no
- // parentheses, as their comment will be associated with the
- // (paren-free) GenDecl, not the import spec itself.
- //
- // We don't directly process the GenDecl so that we can
- // correctly handle the following:
- //
- // import _ "foo"
- // import _ "bar"
- //
- // where only the first import should get flagged.
- skip := map[ast.Spec]bool{}
- ast.Inspect(f, func(node ast.Node) bool {
- switch node := node.(type) {
- case *ast.File:
- return true
- case *ast.GenDecl:
- if node.Tok != token.IMPORT {
- return false
- }
- if node.Lparen == token.NoPos && node.Doc != nil {
- skip[node.Specs[0]] = true
- }
- return false
- }
- return false
- })
- for i, imp := range f.Imports {
- pos := fset.Position(imp.Pos())
-
- if !code.IsBlank(imp.Name) {
- continue
- }
- // Only flag the first blank import in a group of imports,
- // or don't flag any of them, if the first one is
- // commented
- if i > 0 {
- prev := f.Imports[i-1]
- prevPos := fset.Position(prev.Pos())
- if pos.Line-1 == prevPos.Line && code.IsBlank(prev.Name) {
- continue
- }
- }
-
- if imp.Doc == nil && imp.Comment == nil && !skip[imp] {
- report.Report(pass, imp, "a blank import should be only in a main or test package, or have a comment justifying it")
- }
- }
- }
- return nil, nil
-}
-
-func CheckIncDec(pass *analysis.Pass) (interface{}, error) {
- // TODO(dh): this can be noisy for function bodies that look like this:
- // x += 3
- // ...
- // x += 2
- // ...
- // x += 1
- fn := func(node ast.Node) {
- assign := node.(*ast.AssignStmt)
- if assign.Tok != token.ADD_ASSIGN && assign.Tok != token.SUB_ASSIGN {
- return
- }
- if (len(assign.Lhs) != 1 || len(assign.Rhs) != 1) ||
- !code.IsIntLiteral(assign.Rhs[0], "1") {
- return
- }
-
- suffix := ""
- switch assign.Tok {
- case token.ADD_ASSIGN:
- suffix = "++"
- case token.SUB_ASSIGN:
- suffix = "--"
- }
-
- report.Report(pass, assign, fmt.Sprintf("should replace %s with %s%s", report.Render(pass, assign), report.Render(pass, assign.Lhs[0]), suffix))
- }
- code.Preorder(pass, fn, (*ast.AssignStmt)(nil))
- return nil, nil
-}
-
-func CheckErrorReturn(pass *analysis.Pass) (interface{}, error) {
-fnLoop:
- for _, fn := range pass.ResultOf[buildir.Analyzer].(*buildir.IR).SrcFuncs {
- sig := fn.Type().(*types.Signature)
- rets := sig.Results()
- if rets == nil || rets.Len() < 2 {
- continue
- }
-
- if rets.At(rets.Len()-1).Type() == types.Universe.Lookup("error").Type() {
- // Last return type is error. If the function also returns
- // errors in other positions, that's fine.
- continue
- }
- for i := rets.Len() - 2; i >= 0; i-- {
- if rets.At(i).Type() == types.Universe.Lookup("error").Type() {
- report.Report(pass, rets.At(i), "error should be returned as the last argument", report.ShortRange())
- continue fnLoop
- }
- }
- }
- return nil, nil
-}
-
-// CheckUnexportedReturn checks that exported functions on exported
-// types do not return unexported types.
-func CheckUnexportedReturn(pass *analysis.Pass) (interface{}, error) {
- for _, fn := range pass.ResultOf[buildir.Analyzer].(*buildir.IR).SrcFuncs {
- if fn.Synthetic != "" || fn.Parent() != nil {
- continue
- }
- if !ast.IsExported(fn.Name()) || code.IsMain(pass) || code.IsInTest(pass, fn) {
- continue
- }
- sig := fn.Type().(*types.Signature)
- if sig.Recv() != nil && !ast.IsExported(code.Dereference(sig.Recv().Type()).(*types.Named).Obj().Name()) {
- continue
- }
- res := sig.Results()
- for i := 0; i < res.Len(); i++ {
- if named, ok := code.DereferenceR(res.At(i).Type()).(*types.Named); ok &&
- !ast.IsExported(named.Obj().Name()) &&
- named != types.Universe.Lookup("error").Type() {
- report.Report(pass, fn, "should not return unexported type")
- }
- }
- }
- return nil, nil
-}
-
-func CheckReceiverNames(pass *analysis.Pass) (interface{}, error) {
- irpkg := pass.ResultOf[buildir.Analyzer].(*buildir.IR).Pkg
- for _, m := range irpkg.Members {
- if T, ok := m.Object().(*types.TypeName); ok && !T.IsAlias() {
- ms := typeutil.IntuitiveMethodSet(T.Type(), nil)
- for _, sel := range ms {
- fn := sel.Obj().(*types.Func)
- recv := fn.Type().(*types.Signature).Recv()
- if code.Dereference(recv.Type()) != T.Type() {
- // skip embedded methods
- continue
- }
- if recv.Name() == "self" || recv.Name() == "this" {
- report.Report(pass, recv, `receiver name should be a reflection of its identity; don't use generic names such as "this" or "self"`, report.FilterGenerated())
- }
- if recv.Name() == "_" {
- report.Report(pass, recv, "receiver name should not be an underscore, omit the name if it is unused", report.FilterGenerated())
- }
- }
- }
- }
- return nil, nil
-}
-
-func CheckReceiverNamesIdentical(pass *analysis.Pass) (interface{}, error) {
- irpkg := pass.ResultOf[buildir.Analyzer].(*buildir.IR).Pkg
- for _, m := range irpkg.Members {
- names := map[string]int{}
-
- var firstFn *types.Func
- if T, ok := m.Object().(*types.TypeName); ok && !T.IsAlias() {
- ms := typeutil.IntuitiveMethodSet(T.Type(), nil)
- for _, sel := range ms {
- fn := sel.Obj().(*types.Func)
- recv := fn.Type().(*types.Signature).Recv()
- if code.IsGenerated(pass, recv.Pos()) {
- // Don't concern ourselves with methods in generated code
- continue
- }
- if code.Dereference(recv.Type()) != T.Type() {
- // skip embedded methods
- continue
- }
- if firstFn == nil {
- firstFn = fn
- }
- if recv.Name() != "" && recv.Name() != "_" {
- names[recv.Name()]++
- }
- }
- }
-
- if len(names) > 1 {
- var seen []string
- for name, count := range names {
- seen = append(seen, fmt.Sprintf("%dx %q", count, name))
- }
- sort.Strings(seen)
-
- report.Report(pass, firstFn, fmt.Sprintf("methods on the same type should have the same receiver name (seen %s)", strings.Join(seen, ", ")))
- }
- }
- return nil, nil
-}
-
-func CheckContextFirstArg(pass *analysis.Pass) (interface{}, error) {
- // TODO(dh): this check doesn't apply to test helpers. Example from the stdlib:
- // func helperCommandContext(t *testing.T, ctx context.Context, s ...string) (cmd *exec.Cmd) {
-fnLoop:
- for _, fn := range pass.ResultOf[buildir.Analyzer].(*buildir.IR).SrcFuncs {
- if fn.Synthetic != "" || fn.Parent() != nil {
- continue
- }
- params := fn.Signature.Params()
- if params.Len() < 2 {
- continue
- }
- if types.TypeString(params.At(0).Type(), nil) == "context.Context" {
- continue
- }
- for i := 1; i < params.Len(); i++ {
- param := params.At(i)
- if types.TypeString(param.Type(), nil) == "context.Context" {
- report.Report(pass, param, "context.Context should be the first argument of a function", report.ShortRange())
- continue fnLoop
- }
- }
- }
- return nil, nil
-}
-
-func CheckErrorStrings(pass *analysis.Pass) (interface{}, error) {
- objNames := map[*ir.Package]map[string]bool{}
- irpkg := pass.ResultOf[buildir.Analyzer].(*buildir.IR).Pkg
- objNames[irpkg] = map[string]bool{}
- for _, m := range irpkg.Members {
- if typ, ok := m.(*ir.Type); ok {
- objNames[irpkg][typ.Name()] = true
- }
- }
- for _, fn := range pass.ResultOf[buildir.Analyzer].(*buildir.IR).SrcFuncs {
- objNames[fn.Package()][fn.Name()] = true
- }
-
- for _, fn := range pass.ResultOf[buildir.Analyzer].(*buildir.IR).SrcFuncs {
- if code.IsInTest(pass, fn) {
- // We don't care about malformed error messages in tests;
- // they're usually for direct human consumption, not part
- // of an API
- continue
- }
- for _, block := range fn.Blocks {
- instrLoop:
- for _, ins := range block.Instrs {
- call, ok := ins.(*ir.Call)
- if !ok {
- continue
- }
- if !code.IsCallToAny(call.Common(), "errors.New", "fmt.Errorf") {
- continue
- }
-
- k, ok := call.Common().Args[0].(*ir.Const)
- if !ok {
- continue
- }
-
- s := constant.StringVal(k.Value)
- if len(s) == 0 {
- continue
- }
- switch s[len(s)-1] {
- case '.', ':', '!', '\n':
- report.Report(pass, call, "error strings should not end with punctuation or a newline")
- }
- idx := strings.IndexByte(s, ' ')
- if idx == -1 {
- // single word error message, probably not a real
- // error but something used in tests or during
- // debugging
- continue
- }
- word := s[:idx]
- first, n := utf8.DecodeRuneInString(word)
- if !unicode.IsUpper(first) {
- continue
- }
- for _, c := range word[n:] {
- if unicode.IsUpper(c) {
- // Word is probably an initialism or
- // multi-word function name
- continue instrLoop
- }
- }
-
- word = strings.TrimRightFunc(word, func(r rune) bool { return unicode.IsPunct(r) })
- if objNames[fn.Package()][word] {
- // Word is probably the name of a function or type in this package
- continue
- }
- // First word in error starts with a capital
- // letter, and the word doesn't contain any other
- // capitals, making it unlikely to be an
- // initialism or multi-word function name.
- //
- // It could still be a proper noun, though.
-
- report.Report(pass, call, "error strings should not be capitalized")
- }
- }
- }
- return nil, nil
-}
-
-func CheckTimeNames(pass *analysis.Pass) (interface{}, error) {
- suffixes := []string{
- "Sec", "Secs", "Seconds",
- "Msec", "Msecs",
- "Milli", "Millis", "Milliseconds",
- "Usec", "Usecs", "Microseconds",
- "MS", "Ms",
- }
- fn := func(names []*ast.Ident) {
- for _, name := range names {
- if _, ok := pass.TypesInfo.Defs[name]; !ok {
- continue
- }
- T := pass.TypesInfo.TypeOf(name)
- if !code.IsType(T, "time.Duration") && !code.IsType(T, "*time.Duration") {
- continue
- }
- for _, suffix := range suffixes {
- if strings.HasSuffix(name.Name, suffix) {
- report.Report(pass, name, fmt.Sprintf("var %s is of type %v; don't use unit-specific suffix %q", name.Name, T, suffix))
- break
- }
- }
- }
- }
-
- fn2 := func(node ast.Node) {
- switch node := node.(type) {
- case *ast.ValueSpec:
- fn(node.Names)
- case *ast.FieldList:
- for _, field := range node.List {
- fn(field.Names)
- }
- case *ast.AssignStmt:
- if node.Tok != token.DEFINE {
- break
- }
- var names []*ast.Ident
- for _, lhs := range node.Lhs {
- if lhs, ok := lhs.(*ast.Ident); ok {
- names = append(names, lhs)
- }
- }
- fn(names)
- }
- }
-
- code.Preorder(pass, fn2, (*ast.ValueSpec)(nil), (*ast.FieldList)(nil), (*ast.AssignStmt)(nil))
- return nil, nil
-}
-
-func CheckErrorVarNames(pass *analysis.Pass) (interface{}, error) {
- for _, f := range pass.Files {
- for _, decl := range f.Decls {
- gen, ok := decl.(*ast.GenDecl)
- if !ok || gen.Tok != token.VAR {
- continue
- }
- for _, spec := range gen.Specs {
- spec := spec.(*ast.ValueSpec)
- if len(spec.Names) != len(spec.Values) {
- continue
- }
-
- for i, name := range spec.Names {
- val := spec.Values[i]
- if !code.IsCallToAnyAST(pass, val, "errors.New", "fmt.Errorf") {
- continue
- }
-
- if pass.Pkg.Path() == "net/http" && strings.HasPrefix(name.Name, "http2err") {
- // special case for internal variable names of
- // bundled HTTP 2 code in net/http
- continue
- }
- prefix := "err"
- if name.IsExported() {
- prefix = "Err"
- }
- if !strings.HasPrefix(name.Name, prefix) {
- report.Report(pass, name, fmt.Sprintf("error var %s should have name of the form %sFoo", name.Name, prefix))
- }
- }
- }
- }
- }
- return nil, nil
-}
-
-var httpStatusCodes = map[int]string{
- 100: "StatusContinue",
- 101: "StatusSwitchingProtocols",
- 102: "StatusProcessing",
- 200: "StatusOK",
- 201: "StatusCreated",
- 202: "StatusAccepted",
- 203: "StatusNonAuthoritativeInfo",
- 204: "StatusNoContent",
- 205: "StatusResetContent",
- 206: "StatusPartialContent",
- 207: "StatusMultiStatus",
- 208: "StatusAlreadyReported",
- 226: "StatusIMUsed",
- 300: "StatusMultipleChoices",
- 301: "StatusMovedPermanently",
- 302: "StatusFound",
- 303: "StatusSeeOther",
- 304: "StatusNotModified",
- 305: "StatusUseProxy",
- 307: "StatusTemporaryRedirect",
- 308: "StatusPermanentRedirect",
- 400: "StatusBadRequest",
- 401: "StatusUnauthorized",
- 402: "StatusPaymentRequired",
- 403: "StatusForbidden",
- 404: "StatusNotFound",
- 405: "StatusMethodNotAllowed",
- 406: "StatusNotAcceptable",
- 407: "StatusProxyAuthRequired",
- 408: "StatusRequestTimeout",
- 409: "StatusConflict",
- 410: "StatusGone",
- 411: "StatusLengthRequired",
- 412: "StatusPreconditionFailed",
- 413: "StatusRequestEntityTooLarge",
- 414: "StatusRequestURITooLong",
- 415: "StatusUnsupportedMediaType",
- 416: "StatusRequestedRangeNotSatisfiable",
- 417: "StatusExpectationFailed",
- 418: "StatusTeapot",
- 422: "StatusUnprocessableEntity",
- 423: "StatusLocked",
- 424: "StatusFailedDependency",
- 426: "StatusUpgradeRequired",
- 428: "StatusPreconditionRequired",
- 429: "StatusTooManyRequests",
- 431: "StatusRequestHeaderFieldsTooLarge",
- 451: "StatusUnavailableForLegalReasons",
- 500: "StatusInternalServerError",
- 501: "StatusNotImplemented",
- 502: "StatusBadGateway",
- 503: "StatusServiceUnavailable",
- 504: "StatusGatewayTimeout",
- 505: "StatusHTTPVersionNotSupported",
- 506: "StatusVariantAlsoNegotiates",
- 507: "StatusInsufficientStorage",
- 508: "StatusLoopDetected",
- 510: "StatusNotExtended",
- 511: "StatusNetworkAuthenticationRequired",
-}
-
-func CheckHTTPStatusCodes(pass *analysis.Pass) (interface{}, error) {
- whitelist := map[string]bool{}
- for _, code := range config.For(pass).HTTPStatusCodeWhitelist {
- whitelist[code] = true
- }
- fn := func(node ast.Node) {
- call := node.(*ast.CallExpr)
-
- var arg int
- switch code.CallNameAST(pass, call) {
- case "net/http.Error":
- arg = 2
- case "net/http.Redirect":
- arg = 3
- case "net/http.StatusText":
- arg = 0
- case "net/http.RedirectHandler":
- arg = 1
- default:
- return
- }
- lit, ok := call.Args[arg].(*ast.BasicLit)
- if !ok {
- return
- }
- if whitelist[lit.Value] {
- return
- }
-
- n, err := strconv.Atoi(lit.Value)
- if err != nil {
- return
- }
- s, ok := httpStatusCodes[n]
- if !ok {
- return
- }
- report.Report(pass, lit, fmt.Sprintf("should use constant http.%s instead of numeric literal %d", s, n),
- report.FilterGenerated(),
- report.Fixes(edit.Fix(fmt.Sprintf("use http.%s instead of %d", s, n), edit.ReplaceWithString(pass.Fset, lit, "http."+s))))
- }
- code.Preorder(pass, fn, (*ast.CallExpr)(nil))
- return nil, nil
-}
-
-func CheckDefaultCaseOrder(pass *analysis.Pass) (interface{}, error) {
- fn := func(node ast.Node) {
- stmt := node.(*ast.SwitchStmt)
- list := stmt.Body.List
- for i, c := range list {
- if c.(*ast.CaseClause).List == nil && i != 0 && i != len(list)-1 {
- report.Report(pass, c, "default case should be first or last in switch statement", report.FilterGenerated())
- break
- }
- }
- }
- code.Preorder(pass, fn, (*ast.SwitchStmt)(nil))
- return nil, nil
-}
-
-var (
- checkYodaConditionsQ = pattern.MustParse(`(BinaryExpr left@(BasicLit _ _) tok@(Or "==" "!=") right@(Not (BasicLit _ _)))`)
- checkYodaConditionsR = pattern.MustParse(`(BinaryExpr right tok left)`)
-)
-
-func CheckYodaConditions(pass *analysis.Pass) (interface{}, error) {
- fn := func(node ast.Node) {
- if _, edits, ok := MatchAndEdit(pass, checkYodaConditionsQ, checkYodaConditionsR, node); ok {
- report.Report(pass, node, "don't use Yoda conditions",
- report.FilterGenerated(),
- report.Fixes(edit.Fix("un-Yoda-fy", edits...)))
- }
- }
- code.Preorder(pass, fn, (*ast.BinaryExpr)(nil))
- return nil, nil
-}
-
-func CheckInvisibleCharacters(pass *analysis.Pass) (interface{}, error) {
- fn := func(node ast.Node) {
- lit := node.(*ast.BasicLit)
- if lit.Kind != token.STRING {
- return
- }
-
- type invalid struct {
- r rune
- off int
- }
- var invalids []invalid
- hasFormat := false
- hasControl := false
- for off, r := range lit.Value {
- if unicode.Is(unicode.Cf, r) {
- invalids = append(invalids, invalid{r, off})
- hasFormat = true
- } else if unicode.Is(unicode.Cc, r) && r != '\n' && r != '\t' && r != '\r' {
- invalids = append(invalids, invalid{r, off})
- hasControl = true
- }
- }
-
- switch len(invalids) {
- case 0:
- return
- case 1:
- var kind string
- if hasFormat {
- kind = "format"
- } else if hasControl {
- kind = "control"
- } else {
- panic("unreachable")
- }
-
- r := invalids[0]
- msg := fmt.Sprintf("string literal contains the Unicode %s character %U, consider using the %q escape sequence instead", kind, r.r, r.r)
-
- replacement := strconv.QuoteRune(r.r)
- replacement = replacement[1 : len(replacement)-1]
- edit := analysis.SuggestedFix{
- Message: fmt.Sprintf("replace %s character %U with %q", kind, r.r, r.r),
- TextEdits: []analysis.TextEdit{{
- Pos: lit.Pos() + token.Pos(r.off),
- End: lit.Pos() + token.Pos(r.off) + token.Pos(utf8.RuneLen(r.r)),
- NewText: []byte(replacement),
- }},
- }
- delete := analysis.SuggestedFix{
- Message: fmt.Sprintf("delete %s character %U", kind, r),
- TextEdits: []analysis.TextEdit{{
- Pos: lit.Pos() + token.Pos(r.off),
- End: lit.Pos() + token.Pos(r.off) + token.Pos(utf8.RuneLen(r.r)),
- }},
- }
- report.Report(pass, lit, msg, report.Fixes(edit, delete))
- default:
- var kind string
- if hasFormat && hasControl {
- kind = "format and control"
- } else if hasFormat {
- kind = "format"
- } else if hasControl {
- kind = "control"
- } else {
- panic("unreachable")
- }
-
- msg := fmt.Sprintf("string literal contains Unicode %s characters, consider using escape sequences instead", kind)
- var edits []analysis.TextEdit
- var deletions []analysis.TextEdit
- for _, r := range invalids {
- replacement := strconv.QuoteRune(r.r)
- replacement = replacement[1 : len(replacement)-1]
- edits = append(edits, analysis.TextEdit{
- Pos: lit.Pos() + token.Pos(r.off),
- End: lit.Pos() + token.Pos(r.off) + token.Pos(utf8.RuneLen(r.r)),
- NewText: []byte(replacement),
- })
- deletions = append(deletions, analysis.TextEdit{
- Pos: lit.Pos() + token.Pos(r.off),
- End: lit.Pos() + token.Pos(r.off) + token.Pos(utf8.RuneLen(r.r)),
- })
- }
- edit := analysis.SuggestedFix{
- Message: fmt.Sprintf("replace all %s characters with escape sequences", kind),
- TextEdits: edits,
- }
- delete := analysis.SuggestedFix{
- Message: fmt.Sprintf("delete all %s characters", kind),
- TextEdits: deletions,
- }
- report.Report(pass, lit, msg, report.Fixes(edit, delete))
- }
- }
- code.Preorder(pass, fn, (*ast.BasicLit)(nil))
- return nil, nil
-}
-
-func CheckExportedFunctionDocs(pass *analysis.Pass) (interface{}, error) {
- fn := func(node ast.Node) {
- if code.IsInTest(pass, node) {
- return
- }
-
- decl := node.(*ast.FuncDecl)
- if decl.Doc == nil {
- return
- }
- if !ast.IsExported(decl.Name.Name) {
- return
- }
- kind := "function"
- if decl.Recv != nil {
- kind = "method"
- switch T := decl.Recv.List[0].Type.(type) {
- case *ast.StarExpr:
- if !ast.IsExported(T.X.(*ast.Ident).Name) {
- return
- }
- case *ast.Ident:
- if !ast.IsExported(T.Name) {
- return
- }
- default:
- ExhaustiveTypeSwitch(T)
- }
- }
- prefix := decl.Name.Name + " "
- if !strings.HasPrefix(decl.Doc.Text(), prefix) {
- report.Report(pass, decl.Doc, fmt.Sprintf(`comment on exported %s %s should be of the form "%s..."`, kind, decl.Name.Name, prefix), report.FilterGenerated())
- }
- }
-
- code.Preorder(pass, fn, (*ast.FuncDecl)(nil))
- return nil, nil
-}
-
-func CheckExportedTypeDocs(pass *analysis.Pass) (interface{}, error) {
- var genDecl *ast.GenDecl
- fn := func(node ast.Node, push bool) bool {
- if !push {
- genDecl = nil
- return false
- }
- if code.IsInTest(pass, node) {
- return false
- }
-
- switch node := node.(type) {
- case *ast.GenDecl:
- if node.Tok == token.IMPORT {
- return false
- }
- genDecl = node
- return true
- case *ast.TypeSpec:
- if !ast.IsExported(node.Name.Name) {
- return false
- }
-
- doc := node.Doc
- if doc == nil {
- if len(genDecl.Specs) != 1 {
- // more than one spec in the GenDecl, don't validate the
- // docstring
- return false
- }
- if genDecl.Lparen.IsValid() {
- // 'type ( T )' is weird, don't guess the user's intention
- return false
- }
- doc = genDecl.Doc
- if doc == nil {
- return false
- }
- }
-
- s := doc.Text()
- articles := [...]string{"A", "An", "The"}
- for _, a := range articles {
- if strings.HasPrefix(s, a+" ") {
- s = s[len(a)+1:]
- break
- }
- }
- if !strings.HasPrefix(s, node.Name.Name+" ") {
- report.Report(pass, doc, fmt.Sprintf(`comment on exported type %s should be of the form "%s ..." (with optional leading article)`, node.Name.Name, node.Name.Name), report.FilterGenerated())
- }
- return false
- case *ast.FuncLit, *ast.FuncDecl:
- return false
- default:
- ExhaustiveTypeSwitch(node)
- return false
- }
- }
-
- pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Nodes([]ast.Node{(*ast.GenDecl)(nil), (*ast.TypeSpec)(nil), (*ast.FuncLit)(nil), (*ast.FuncDecl)(nil)}, fn)
- return nil, nil
-}
-
-func CheckExportedVarDocs(pass *analysis.Pass) (interface{}, error) {
- var genDecl *ast.GenDecl
- fn := func(node ast.Node, push bool) bool {
- if !push {
- genDecl = nil
- return false
- }
- if code.IsInTest(pass, node) {
- return false
- }
-
- switch node := node.(type) {
- case *ast.GenDecl:
- if node.Tok == token.IMPORT {
- return false
- }
- genDecl = node
- return true
- case *ast.ValueSpec:
- if genDecl.Lparen.IsValid() || len(node.Names) > 1 {
- // Don't try to guess the user's intention
- return false
- }
- name := node.Names[0].Name
- if !ast.IsExported(name) {
- return false
- }
- if genDecl.Doc == nil {
- return false
- }
- prefix := name + " "
- if !strings.HasPrefix(genDecl.Doc.Text(), prefix) {
- kind := "var"
- if genDecl.Tok == token.CONST {
- kind = "const"
- }
- report.Report(pass, genDecl.Doc, fmt.Sprintf(`comment on exported %s %s should be of the form "%s..."`, kind, name, prefix), report.FilterGenerated())
- }
- return false
- case *ast.FuncLit, *ast.FuncDecl:
- return false
- default:
- ExhaustiveTypeSwitch(node)
- return false
- }
- }
-
- pass.ResultOf[inspect.Analyzer].(*inspector.Inspector).Nodes([]ast.Node{(*ast.GenDecl)(nil), (*ast.ValueSpec)(nil), (*ast.FuncLit)(nil), (*ast.FuncDecl)(nil)}, fn)
- return nil, nil
-}
diff --git a/vendor/honnef.co/go/tools/stylecheck/names.go b/vendor/honnef.co/go/tools/stylecheck/names.go
deleted file mode 100644
index ffc689e98b8..00000000000
--- a/vendor/honnef.co/go/tools/stylecheck/names.go
+++ /dev/null
@@ -1,276 +0,0 @@
-// Copyright (c) 2013 The Go Authors. All rights reserved.
-// Copyright (c) 2018 Dominik Honnef. All rights reserved.
-
-package stylecheck
-
-import (
- "fmt"
- "go/ast"
- "go/token"
- "strings"
- "unicode"
-
- "golang.org/x/tools/go/analysis"
- "honnef.co/go/tools/code"
- "honnef.co/go/tools/config"
- "honnef.co/go/tools/report"
-)
-
-// knownNameExceptions is a set of names that are known to be exempt from naming checks.
-// This is usually because they are constrained by having to match names in the
-// standard library.
-var knownNameExceptions = map[string]bool{
- "LastInsertId": true, // must match database/sql
- "kWh": true,
-}
-
-func CheckNames(pass *analysis.Pass) (interface{}, error) {
- // A large part of this function is copied from
- // github.com/golang/lint, Copyright (c) 2013 The Go Authors,
- // licensed under the BSD 3-clause license.
-
- allCaps := func(s string) bool {
- for _, r := range s {
- if !((r >= 'A' && r <= 'Z') || (r >= '0' && r <= '9') || r == '_') {
- return false
- }
- }
- return true
- }
-
- check := func(id *ast.Ident, thing string, initialisms map[string]bool) {
- if id.Name == "_" {
- return
- }
- if knownNameExceptions[id.Name] {
- return
- }
-
- // Handle two common styles from other languages that don't belong in Go.
- if len(id.Name) >= 5 && allCaps(id.Name) && strings.Contains(id.Name, "_") {
- report.Report(pass, id, "should not use ALL_CAPS in Go names; use CamelCase instead", report.FilterGenerated())
- return
- }
-
- should := lintName(id.Name, initialisms)
- if id.Name == should {
- return
- }
-
- if len(id.Name) > 2 && strings.Contains(id.Name[1:len(id.Name)-1], "_") {
- report.Report(pass, id, fmt.Sprintf("should not use underscores in Go names; %s %s should be %s", thing, id.Name, should), report.FilterGenerated())
- return
- }
- report.Report(pass, id, fmt.Sprintf("%s %s should be %s", thing, id.Name, should), report.FilterGenerated())
- }
- checkList := func(fl *ast.FieldList, thing string, initialisms map[string]bool) {
- if fl == nil {
- return
- }
- for _, f := range fl.List {
- for _, id := range f.Names {
- check(id, thing, initialisms)
- }
- }
- }
-
- il := config.For(pass).Initialisms
- initialisms := make(map[string]bool, len(il))
- for _, word := range il {
- initialisms[word] = true
- }
- for _, f := range pass.Files {
- // Package names need slightly different handling than other names.
- if !strings.HasSuffix(f.Name.Name, "_test") && strings.Contains(f.Name.Name, "_") {
- report.Report(pass, f, "should not use underscores in package names", report.FilterGenerated())
- }
- if strings.IndexFunc(f.Name.Name, unicode.IsUpper) != -1 {
- report.Report(pass, f, fmt.Sprintf("should not use MixedCaps in package name; %s should be %s", f.Name.Name, strings.ToLower(f.Name.Name)), report.FilterGenerated())
- }
- }
-
- fn := func(node ast.Node) {
- switch v := node.(type) {
- case *ast.AssignStmt:
- if v.Tok != token.DEFINE {
- return
- }
- for _, exp := range v.Lhs {
- if id, ok := exp.(*ast.Ident); ok {
- check(id, "var", initialisms)
- }
- }
- case *ast.FuncDecl:
- // Functions with no body are defined elsewhere (in
- // assembly, or via go:linkname). These are likely to
- // be something very low level (such as the runtime),
- // where our rules don't apply.
- if v.Body == nil {
- return
- }
-
- if code.IsInTest(pass, v) && (strings.HasPrefix(v.Name.Name, "Example") || strings.HasPrefix(v.Name.Name, "Test") || strings.HasPrefix(v.Name.Name, "Benchmark")) {
- return
- }
-
- thing := "func"
- if v.Recv != nil {
- thing = "method"
- }
-
- if !isTechnicallyExported(v) {
- check(v.Name, thing, initialisms)
- }
-
- checkList(v.Type.Params, thing+" parameter", initialisms)
- checkList(v.Type.Results, thing+" result", initialisms)
- case *ast.GenDecl:
- if v.Tok == token.IMPORT {
- return
- }
- var thing string
- switch v.Tok {
- case token.CONST:
- thing = "const"
- case token.TYPE:
- thing = "type"
- case token.VAR:
- thing = "var"
- }
- for _, spec := range v.Specs {
- switch s := spec.(type) {
- case *ast.TypeSpec:
- check(s.Name, thing, initialisms)
- case *ast.ValueSpec:
- for _, id := range s.Names {
- check(id, thing, initialisms)
- }
- }
- }
- case *ast.InterfaceType:
- // Do not check interface method names.
- // They are often constrained by the method names of concrete types.
- for _, x := range v.Methods.List {
- ft, ok := x.Type.(*ast.FuncType)
- if !ok { // might be an embedded interface name
- continue
- }
- checkList(ft.Params, "interface method parameter", initialisms)
- checkList(ft.Results, "interface method result", initialisms)
- }
- case *ast.RangeStmt:
- if v.Tok == token.ASSIGN {
- return
- }
- if id, ok := v.Key.(*ast.Ident); ok {
- check(id, "range var", initialisms)
- }
- if id, ok := v.Value.(*ast.Ident); ok {
- check(id, "range var", initialisms)
- }
- case *ast.StructType:
- for _, f := range v.Fields.List {
- for _, id := range f.Names {
- check(id, "struct field", initialisms)
- }
- }
- }
- }
-
- needle := []ast.Node{
- (*ast.AssignStmt)(nil),
- (*ast.FuncDecl)(nil),
- (*ast.GenDecl)(nil),
- (*ast.InterfaceType)(nil),
- (*ast.RangeStmt)(nil),
- (*ast.StructType)(nil),
- }
-
- code.Preorder(pass, fn, needle...)
- return nil, nil
-}
-
-// lintName returns a different name if it should be different.
-func lintName(name string, initialisms map[string]bool) (should string) {
- // A large part of this function is copied from
- // github.com/golang/lint, Copyright (c) 2013 The Go Authors,
- // licensed under the BSD 3-clause license.
-
- // Fast path for simple cases: "_" and all lowercase.
- if name == "_" {
- return name
- }
- if strings.IndexFunc(name, func(r rune) bool { return !unicode.IsLower(r) }) == -1 {
- return name
- }
-
- // Split camelCase at any lower->upper transition, and split on underscores.
- // Check each word for common initialisms.
- runes := []rune(name)
- w, i := 0, 0 // index of start of word, scan
- for i+1 <= len(runes) {
- eow := false // whether we hit the end of a word
- if i+1 == len(runes) {
- eow = true
- } else if runes[i+1] == '_' && i+1 != len(runes)-1 {
- // underscore; shift the remainder forward over any run of underscores
- eow = true
- n := 1
- for i+n+1 < len(runes) && runes[i+n+1] == '_' {
- n++
- }
-
- // Leave at most one underscore if the underscore is between two digits
- if i+n+1 < len(runes) && unicode.IsDigit(runes[i]) && unicode.IsDigit(runes[i+n+1]) {
- n--
- }
-
- copy(runes[i+1:], runes[i+n+1:])
- runes = runes[:len(runes)-n]
- } else if unicode.IsLower(runes[i]) && !unicode.IsLower(runes[i+1]) {
- // lower->non-lower
- eow = true
- }
- i++
- if !eow {
- continue
- }
-
- // [w,i) is a word.
- word := string(runes[w:i])
- if u := strings.ToUpper(word); initialisms[u] {
- // Keep consistent case, which is lowercase only at the start.
- if w == 0 && unicode.IsLower(runes[w]) {
- u = strings.ToLower(u)
- }
- // All the common initialisms are ASCII,
- // so we can replace the bytes exactly.
- // TODO(dh): this won't be true once we allow custom initialisms
- copy(runes[w:], []rune(u))
- } else if w > 0 && strings.ToLower(word) == word {
- // already all lowercase, and not the first word, so uppercase the first character.
- runes[w] = unicode.ToUpper(runes[w])
- }
- w = i
- }
- return string(runes)
-}
-
-func isTechnicallyExported(f *ast.FuncDecl) bool {
- if f.Recv != nil || f.Doc == nil {
- return false
- }
-
- const export = "//export "
- const linkname = "//go:linkname "
- for _, c := range f.Doc.List {
- if strings.HasPrefix(c.Text, export) && len(c.Text) == len(export)+len(f.Name.Name) && c.Text[len(export):] == f.Name.Name {
- return true
- }
-
- if strings.HasPrefix(c.Text, linkname) {
- return true
- }
- }
- return false
-}
diff --git a/vendor/honnef.co/go/tools/unused/edge.go b/vendor/honnef.co/go/tools/unused/edge.go
deleted file mode 100644
index 02e0d09cf2a..00000000000
--- a/vendor/honnef.co/go/tools/unused/edge.go
+++ /dev/null
@@ -1,54 +0,0 @@
-package unused
-
-//go:generate stringer -type edgeKind
-type edgeKind uint64
-
-func (e edgeKind) is(o edgeKind) bool {
- return e&o != 0
-}
-
-const (
- edgeAlias edgeKind = 1 << iota
- edgeBlankField
- edgeAnonymousStruct
- edgeCgoExported
- edgeConstGroup
- edgeElementType
- edgeEmbeddedInterface
- edgeExportedConstant
- edgeExportedField
- edgeExportedFunction
- edgeExportedMethod
- edgeExportedType
- edgeExportedVariable
- edgeExtendsExportedFields
- edgeExtendsExportedMethodSet
- edgeFieldAccess
- edgeFunctionArgument
- edgeFunctionResult
- edgeFunctionSignature
- edgeImplements
- edgeInstructionOperand
- edgeInterfaceCall
- edgeInterfaceMethod
- edgeKeyType
- edgeLinkname
- edgeMainFunction
- edgeNamedType
- edgeNetRPCRegister
- edgeNoCopySentinel
- edgeProvidesMethod
- edgeReceiver
- edgeRuntimeFunction
- edgeSignature
- edgeStructConversion
- edgeTestSink
- edgeTupleElement
- edgeType
- edgeTypeName
- edgeUnderlyingType
- edgePointerType
- edgeUnsafeConversion
- edgeUsedConstant
- edgeVarDecl
-)
diff --git a/vendor/honnef.co/go/tools/unused/edgekind_string.go b/vendor/honnef.co/go/tools/unused/edgekind_string.go
deleted file mode 100644
index 7629636cf13..00000000000
--- a/vendor/honnef.co/go/tools/unused/edgekind_string.go
+++ /dev/null
@@ -1,109 +0,0 @@
-// Code generated by "stringer -type edgeKind"; DO NOT EDIT.
-
-package unused
-
-import "strconv"
-
-func _() {
- // An "invalid array index" compiler error signifies that the constant values have changed.
- // Re-run the stringer command to generate them again.
- var x [1]struct{}
- _ = x[edgeAlias-1]
- _ = x[edgeBlankField-2]
- _ = x[edgeAnonymousStruct-4]
- _ = x[edgeCgoExported-8]
- _ = x[edgeConstGroup-16]
- _ = x[edgeElementType-32]
- _ = x[edgeEmbeddedInterface-64]
- _ = x[edgeExportedConstant-128]
- _ = x[edgeExportedField-256]
- _ = x[edgeExportedFunction-512]
- _ = x[edgeExportedMethod-1024]
- _ = x[edgeExportedType-2048]
- _ = x[edgeExportedVariable-4096]
- _ = x[edgeExtendsExportedFields-8192]
- _ = x[edgeExtendsExportedMethodSet-16384]
- _ = x[edgeFieldAccess-32768]
- _ = x[edgeFunctionArgument-65536]
- _ = x[edgeFunctionResult-131072]
- _ = x[edgeFunctionSignature-262144]
- _ = x[edgeImplements-524288]
- _ = x[edgeInstructionOperand-1048576]
- _ = x[edgeInterfaceCall-2097152]
- _ = x[edgeInterfaceMethod-4194304]
- _ = x[edgeKeyType-8388608]
- _ = x[edgeLinkname-16777216]
- _ = x[edgeMainFunction-33554432]
- _ = x[edgeNamedType-67108864]
- _ = x[edgeNetRPCRegister-134217728]
- _ = x[edgeNoCopySentinel-268435456]
- _ = x[edgeProvidesMethod-536870912]
- _ = x[edgeReceiver-1073741824]
- _ = x[edgeRuntimeFunction-2147483648]
- _ = x[edgeSignature-4294967296]
- _ = x[edgeStructConversion-8589934592]
- _ = x[edgeTestSink-17179869184]
- _ = x[edgeTupleElement-34359738368]
- _ = x[edgeType-68719476736]
- _ = x[edgeTypeName-137438953472]
- _ = x[edgeUnderlyingType-274877906944]
- _ = x[edgePointerType-549755813888]
- _ = x[edgeUnsafeConversion-1099511627776]
- _ = x[edgeUsedConstant-2199023255552]
- _ = x[edgeVarDecl-4398046511104]
-}
-
-const _edgeKind_name = "edgeAliasedgeBlankFieldedgeAnonymousStructedgeCgoExportededgeConstGroupedgeElementTypeedgeEmbeddedInterfaceedgeExportedConstantedgeExportedFieldedgeExportedFunctionedgeExportedMethodedgeExportedTypeedgeExportedVariableedgeExtendsExportedFieldsedgeExtendsExportedMethodSetedgeFieldAccessedgeFunctionArgumentedgeFunctionResultedgeFunctionSignatureedgeImplementsedgeInstructionOperandedgeInterfaceCalledgeInterfaceMethodedgeKeyTypeedgeLinknameedgeMainFunctionedgeNamedTypeedgeNetRPCRegisteredgeNoCopySentineledgeProvidesMethodedgeReceiveredgeRuntimeFunctionedgeSignatureedgeStructConversionedgeTestSinkedgeTupleElementedgeTypeedgeTypeNameedgeUnderlyingTypeedgePointerTypeedgeUnsafeConversionedgeUsedConstantedgeVarDecl"
-
-var _edgeKind_map = map[edgeKind]string{
- 1: _edgeKind_name[0:9],
- 2: _edgeKind_name[9:23],
- 4: _edgeKind_name[23:42],
- 8: _edgeKind_name[42:57],
- 16: _edgeKind_name[57:71],
- 32: _edgeKind_name[71:86],
- 64: _edgeKind_name[86:107],
- 128: _edgeKind_name[107:127],
- 256: _edgeKind_name[127:144],
- 512: _edgeKind_name[144:164],
- 1024: _edgeKind_name[164:182],
- 2048: _edgeKind_name[182:198],
- 4096: _edgeKind_name[198:218],
- 8192: _edgeKind_name[218:243],
- 16384: _edgeKind_name[243:271],
- 32768: _edgeKind_name[271:286],
- 65536: _edgeKind_name[286:306],
- 131072: _edgeKind_name[306:324],
- 262144: _edgeKind_name[324:345],
- 524288: _edgeKind_name[345:359],
- 1048576: _edgeKind_name[359:381],
- 2097152: _edgeKind_name[381:398],
- 4194304: _edgeKind_name[398:417],
- 8388608: _edgeKind_name[417:428],
- 16777216: _edgeKind_name[428:440],
- 33554432: _edgeKind_name[440:456],
- 67108864: _edgeKind_name[456:469],
- 134217728: _edgeKind_name[469:487],
- 268435456: _edgeKind_name[487:505],
- 536870912: _edgeKind_name[505:523],
- 1073741824: _edgeKind_name[523:535],
- 2147483648: _edgeKind_name[535:554],
- 4294967296: _edgeKind_name[554:567],
- 8589934592: _edgeKind_name[567:587],
- 17179869184: _edgeKind_name[587:599],
- 34359738368: _edgeKind_name[599:615],
- 68719476736: _edgeKind_name[615:623],
- 137438953472: _edgeKind_name[623:635],
- 274877906944: _edgeKind_name[635:653],
- 549755813888: _edgeKind_name[653:668],
- 1099511627776: _edgeKind_name[668:688],
- 2199023255552: _edgeKind_name[688:704],
- 4398046511104: _edgeKind_name[704:715],
-}
-
-func (i edgeKind) String() string {
- if str, ok := _edgeKind_map[i]; ok {
- return str
- }
- return "edgeKind(" + strconv.FormatInt(int64(i), 10) + ")"
-}
diff --git a/vendor/honnef.co/go/tools/unused/implements.go b/vendor/honnef.co/go/tools/unused/implements.go
deleted file mode 100644
index 835baac6925..00000000000
--- a/vendor/honnef.co/go/tools/unused/implements.go
+++ /dev/null
@@ -1,82 +0,0 @@
-package unused
-
-import "go/types"
-
-// lookupMethod returns the index of and method with matching package and name, or (-1, nil).
-func lookupMethod(T *types.Interface, pkg *types.Package, name string) (int, *types.Func) {
- if name != "_" {
- for i := 0; i < T.NumMethods(); i++ {
- m := T.Method(i)
- if sameId(m, pkg, name) {
- return i, m
- }
- }
- }
- return -1, nil
-}
-
-func sameId(obj types.Object, pkg *types.Package, name string) bool {
- // spec:
- // "Two identifiers are different if they are spelled differently,
- // or if they appear in different packages and are not exported.
- // Otherwise, they are the same."
- if name != obj.Name() {
- return false
- }
- // obj.Name == name
- if obj.Exported() {
- return true
- }
- // not exported, so packages must be the same (pkg == nil for
- // fields in Universe scope; this can only happen for types
- // introduced via Eval)
- if pkg == nil || obj.Pkg() == nil {
- return pkg == obj.Pkg()
- }
- // pkg != nil && obj.pkg != nil
- return pkg.Path() == obj.Pkg().Path()
-}
-
-func (g *Graph) implements(V types.Type, T *types.Interface, msV *types.MethodSet) ([]*types.Selection, bool) {
- // fast path for common case
- if T.Empty() {
- return nil, true
- }
-
- if ityp, _ := V.Underlying().(*types.Interface); ityp != nil {
- // TODO(dh): is this code reachable?
- for i := 0; i < T.NumMethods(); i++ {
- m := T.Method(i)
- _, obj := lookupMethod(ityp, m.Pkg(), m.Name())
- switch {
- case obj == nil:
- return nil, false
- case !types.Identical(obj.Type(), m.Type()):
- return nil, false
- }
- }
- return nil, true
- }
-
- // A concrete type implements T if it implements all methods of T.
- var sels []*types.Selection
- for i := 0; i < T.NumMethods(); i++ {
- m := T.Method(i)
- sel := msV.Lookup(m.Pkg(), m.Name())
- if sel == nil {
- return nil, false
- }
-
- f, _ := sel.Obj().(*types.Func)
- if f == nil {
- return nil, false
- }
-
- if !types.Identical(f.Type(), m.Type()) {
- return nil, false
- }
-
- sels = append(sels, sel)
- }
- return sels, true
-}
diff --git a/vendor/honnef.co/go/tools/unused/unused.go b/vendor/honnef.co/go/tools/unused/unused.go
deleted file mode 100644
index 0df5fc8ff82..00000000000
--- a/vendor/honnef.co/go/tools/unused/unused.go
+++ /dev/null
@@ -1,1978 +0,0 @@
-package unused
-
-import (
- "fmt"
- "go/ast"
- "go/token"
- "go/types"
- "io"
- "strings"
- "sync"
- "sync/atomic"
-
- "golang.org/x/tools/go/analysis"
- "honnef.co/go/tools/code"
- "honnef.co/go/tools/go/types/typeutil"
- "honnef.co/go/tools/internal/passes/buildir"
- "honnef.co/go/tools/ir"
- "honnef.co/go/tools/lint"
-)
-
-// The graph we construct omits nodes along a path that do not
-// contribute any new information to the solution. For example, the
-// full graph for a function with a receiver would be Func ->
-// Signature -> Var -> Type. However, since signatures cannot be
-// unused, and receivers are always considered used, we can compact
-// the graph down to Func -> Type. This makes the graph smaller, but
-// harder to debug.
-
-// TODO(dh): conversions between structs mark fields as used, but the
-// conversion itself isn't part of that subgraph. even if the function
-// containing the conversion is unused, the fields will be marked as
-// used.
-
-// TODO(dh): we cannot observe function calls in assembly files.
-
-/*
-
-- packages use:
- - (1.1) exported named types (unless in package main)
- - (1.2) exported functions (unless in package main)
- - (1.3) exported variables (unless in package main)
- - (1.4) exported constants (unless in package main)
- - (1.5) init functions
- - (1.6) functions exported to cgo
- - (1.7) the main function iff in the main package
- - (1.8) symbols linked via go:linkname
-
-- named types use:
- - (2.1) exported methods
- - (2.2) the type they're based on
- - (2.3) all their aliases. we can't easily track uses of aliases
- because go/types turns them into uses of the aliased types. assume
- that if a type is used, so are all of its aliases.
- - (2.4) the pointer type. this aids with eagerly implementing
- interfaces. if a method that implements an interface is defined on
- a pointer receiver, and the pointer type is never used, but the
- named type is, then we still want to mark the method as used.
-
-- variables and constants use:
- - their types
-
-- functions use:
- - (4.1) all their arguments, return parameters and receivers
- - (4.2) anonymous functions defined beneath them
- - (4.3) closures and bound methods.
- this implements a simplified model where a function is used merely by being referenced, even if it is never called.
- that way we don't have to keep track of closures escaping functions.
- - (4.4) functions they return. we assume that someone else will call the returned function
- - (4.5) functions/interface methods they call
- - types they instantiate or convert to
- - (4.7) fields they access
- - (4.8) types of all instructions
- - (4.9) package-level variables they assign to iff in tests (sinks for benchmarks)
-
-- conversions use:
- - (5.1) when converting between two equivalent structs, the fields in
- either struct use each other. the fields are relevant for the
- conversion, but only if the fields are also accessed outside the
- conversion.
- - (5.2) when converting to or from unsafe.Pointer, mark all fields as used.
-
-- structs use:
- - (6.1) fields of type NoCopy sentinel
- - (6.2) exported fields
- - (6.3) embedded fields that help implement interfaces (either fully implements it, or contributes required methods) (recursively)
- - (6.4) embedded fields that have exported methods (recursively)
- - (6.5) embedded structs that have exported fields (recursively)
-
-- (7.1) field accesses use fields
-- (7.2) fields use their types
-
-- (8.0) How we handle interfaces:
- - (8.1) We do not technically care about interfaces that only consist of
- exported methods. Exported methods on concrete types are always
- marked as used.
- - Any concrete type implements all known interfaces. Even if it isn't
- assigned to any interfaces in our code, the user may receive a value
- of the type and expect to pass it back to us through an interface.
-
- Concrete types use their methods that implement interfaces. If the
- type is used, it uses those methods. Otherwise, it doesn't. This
- way, types aren't incorrectly marked reachable through the edge
- from method to type.
-
- - (8.3) All interface methods are marked as used, even if they never get
- called. This is to accommodate sum types (unexported interface
- method that must exist but never gets called.)
-
- - (8.4) All embedded interfaces are marked as used. This is an
- extension of 8.3, but we have to explicitly track embedded
- interfaces because in a chain C->B->A, B wouldn't be marked as
- used by 8.3 just because it contributes A's methods to C.
-
-- Inherent uses:
- - thunks and other generated wrappers call the real function
- - (9.2) variables use their types
- - (9.3) types use their underlying and element types
- - (9.4) conversions use the type they convert to
- - (9.5) instructions use their operands
- - (9.6) instructions use their operands' types
- - (9.7) variable _reads_ use variables, writes do not, except in tests
- - (9.8) runtime functions that may be called from user code via the compiler
-
-
-- const groups:
- (10.1) if one constant out of a block of constants is used, mark all
- of them used. a lot of the time, unused constants exist for the sake
- of completeness. See also
- https://github.com/dominikh/go-tools/issues/365
-
-
-- (11.1) anonymous struct types use all their fields. we cannot
- deduplicate struct types, as that leads to order-dependent
- reportings. we can't not deduplicate struct types while still
- tracking fields, because then each instance of the unnamed type in
- the data flow chain will get its own fields, causing false
- positives. Thus, we only accurately track fields of named struct
- types, and assume that unnamed struct types use all their fields.
-
-
-- Differences in whole program mode:
- - (e2) types aim to implement all exported interfaces from all packages
- - (e3) exported identifiers aren't automatically used. for fields and
- methods this poses extra issues due to reflection. We assume
- that all exported fields are used. We also maintain a list of
- known reflection-based method callers.
-
-*/
-
-func assert(b bool) {
- if !b {
- panic("failed assertion")
- }
-}
-
-func typString(obj types.Object) string {
- switch obj := obj.(type) {
- case *types.Func:
- return "func"
- case *types.Var:
- if obj.IsField() {
- return "field"
- }
- return "var"
- case *types.Const:
- return "const"
- case *types.TypeName:
- return "type"
- default:
- return "identifier"
- }
-}
-
-// /usr/lib/go/src/runtime/proc.go:433:6: func badmorestackg0 is unused (U1000)
-
-// Functions defined in the Go runtime that may be called through
-// compiler magic or via assembly.
-var runtimeFuncs = map[string]bool{
- // The first part of the list is copied from
- // cmd/compile/internal/gc/builtin.go, var runtimeDecls
- "newobject": true,
- "panicindex": true,
- "panicslice": true,
- "panicdivide": true,
- "panicmakeslicelen": true,
- "throwinit": true,
- "panicwrap": true,
- "gopanic": true,
- "gorecover": true,
- "goschedguarded": true,
- "printbool": true,
- "printfloat": true,
- "printint": true,
- "printhex": true,
- "printuint": true,
- "printcomplex": true,
- "printstring": true,
- "printpointer": true,
- "printiface": true,
- "printeface": true,
- "printslice": true,
- "printnl": true,
- "printsp": true,
- "printlock": true,
- "printunlock": true,
- "concatstring2": true,
- "concatstring3": true,
- "concatstring4": true,
- "concatstring5": true,
- "concatstrings": true,
- "cmpstring": true,
- "intstring": true,
- "slicebytetostring": true,
- "slicebytetostringtmp": true,
- "slicerunetostring": true,
- "stringtoslicebyte": true,
- "stringtoslicerune": true,
- "slicecopy": true,
- "slicestringcopy": true,
- "decoderune": true,
- "countrunes": true,
- "convI2I": true,
- "convT16": true,
- "convT32": true,
- "convT64": true,
- "convTstring": true,
- "convTslice": true,
- "convT2E": true,
- "convT2Enoptr": true,
- "convT2I": true,
- "convT2Inoptr": true,
- "assertE2I": true,
- "assertE2I2": true,
- "assertI2I": true,
- "assertI2I2": true,
- "panicdottypeE": true,
- "panicdottypeI": true,
- "panicnildottype": true,
- "ifaceeq": true,
- "efaceeq": true,
- "fastrand": true,
- "makemap64": true,
- "makemap": true,
- "makemap_small": true,
- "mapaccess1": true,
- "mapaccess1_fast32": true,
- "mapaccess1_fast64": true,
- "mapaccess1_faststr": true,
- "mapaccess1_fat": true,
- "mapaccess2": true,
- "mapaccess2_fast32": true,
- "mapaccess2_fast64": true,
- "mapaccess2_faststr": true,
- "mapaccess2_fat": true,
- "mapassign": true,
- "mapassign_fast32": true,
- "mapassign_fast32ptr": true,
- "mapassign_fast64": true,
- "mapassign_fast64ptr": true,
- "mapassign_faststr": true,
- "mapiterinit": true,
- "mapdelete": true,
- "mapdelete_fast32": true,
- "mapdelete_fast64": true,
- "mapdelete_faststr": true,
- "mapiternext": true,
- "mapclear": true,
- "makechan64": true,
- "makechan": true,
- "chanrecv1": true,
- "chanrecv2": true,
- "chansend1": true,
- "closechan": true,
- "writeBarrier": true,
- "typedmemmove": true,
- "typedmemclr": true,
- "typedslicecopy": true,
- "selectnbsend": true,
- "selectnbrecv": true,
- "selectnbrecv2": true,
- "selectsetpc": true,
- "selectgo": true,
- "block": true,
- "makeslice": true,
- "makeslice64": true,
- "growslice": true,
- "memmove": true,
- "memclrNoHeapPointers": true,
- "memclrHasPointers": true,
- "memequal": true,
- "memequal8": true,
- "memequal16": true,
- "memequal32": true,
- "memequal64": true,
- "memequal128": true,
- "int64div": true,
- "uint64div": true,
- "int64mod": true,
- "uint64mod": true,
- "float64toint64": true,
- "float64touint64": true,
- "float64touint32": true,
- "int64tofloat64": true,
- "uint64tofloat64": true,
- "uint32tofloat64": true,
- "complex128div": true,
- "racefuncenter": true,
- "racefuncenterfp": true,
- "racefuncexit": true,
- "raceread": true,
- "racewrite": true,
- "racereadrange": true,
- "racewriterange": true,
- "msanread": true,
- "msanwrite": true,
- "x86HasPOPCNT": true,
- "x86HasSSE41": true,
- "arm64HasATOMICS": true,
-
- // The second part of the list is extracted from assembly code in
- // the standard library, with the exception of the runtime package itself
- "abort": true,
- "aeshashbody": true,
- "args": true,
- "asminit": true,
- "badctxt": true,
- "badmcall2": true,
- "badmcall": true,
- "badmorestackg0": true,
- "badmorestackgsignal": true,
- "badsignal2": true,
- "callbackasm1": true,
- "callCfunction": true,
- "cgocallback_gofunc": true,
- "cgocallbackg": true,
- "checkgoarm": true,
- "check": true,
- "debugCallCheck": true,
- "debugCallWrap": true,
- "emptyfunc": true,
- "entersyscall": true,
- "exit": true,
- "exits": true,
- "exitsyscall": true,
- "externalthreadhandler": true,
- "findnull": true,
- "goexit1": true,
- "gostring": true,
- "i386_set_ldt": true,
- "_initcgo": true,
- "init_thread_tls": true,
- "ldt0setup": true,
- "libpreinit": true,
- "load_g": true,
- "morestack": true,
- "mstart": true,
- "nacl_sysinfo": true,
- "nanotimeQPC": true,
- "nanotime": true,
- "newosproc0": true,
- "newproc": true,
- "newstack": true,
- "noted": true,
- "nowQPC": true,
- "osinit": true,
- "printf": true,
- "racecallback": true,
- "reflectcallmove": true,
- "reginit": true,
- "rt0_go": true,
- "save_g": true,
- "schedinit": true,
- "setldt": true,
- "settls": true,
- "sighandler": true,
- "sigprofNonGo": true,
- "sigtrampgo": true,
- "_sigtramp": true,
- "sigtramp": true,
- "stackcheck": true,
- "syscall_chdir": true,
- "syscall_chroot": true,
- "syscall_close": true,
- "syscall_dup2": true,
- "syscall_execve": true,
- "syscall_exit": true,
- "syscall_fcntl": true,
- "syscall_forkx": true,
- "syscall_gethostname": true,
- "syscall_getpid": true,
- "syscall_ioctl": true,
- "syscall_pipe": true,
- "syscall_rawsyscall6": true,
- "syscall_rawSyscall6": true,
- "syscall_rawsyscall": true,
- "syscall_RawSyscall": true,
- "syscall_rawsysvicall6": true,
- "syscall_setgid": true,
- "syscall_setgroups": true,
- "syscall_setpgid": true,
- "syscall_setsid": true,
- "syscall_setuid": true,
- "syscall_syscall6": true,
- "syscall_syscall": true,
- "syscall_Syscall": true,
- "syscall_sysvicall6": true,
- "syscall_wait4": true,
- "syscall_write": true,
- "traceback": true,
- "tstart": true,
- "usplitR0": true,
- "wbBufFlush": true,
- "write": true,
-}
-
-type pkg struct {
- Fset *token.FileSet
- Files []*ast.File
- Pkg *types.Package
- TypesInfo *types.Info
- TypesSizes types.Sizes
- IR *ir.Package
- SrcFuncs []*ir.Function
-}
-
-type Checker struct {
- WholeProgram bool
- Debug io.Writer
-
- mu sync.Mutex
- initialPackages map[*types.Package]struct{}
- allPackages map[*types.Package]struct{}
- graph *Graph
-}
-
-func NewChecker(wholeProgram bool) *Checker {
- return &Checker{
- initialPackages: map[*types.Package]struct{}{},
- allPackages: map[*types.Package]struct{}{},
- WholeProgram: wholeProgram,
- }
-}
-
-func (c *Checker) Analyzer() *analysis.Analyzer {
- name := "U1000"
- if c.WholeProgram {
- name = "U1001"
- }
- return &analysis.Analyzer{
- Name: name,
- Doc: "Unused code",
- Run: c.Run,
- Requires: []*analysis.Analyzer{buildir.Analyzer},
- }
-}
-
-func (c *Checker) Run(pass *analysis.Pass) (interface{}, error) {
- c.mu.Lock()
- if c.graph == nil {
- c.graph = NewGraph()
- c.graph.wholeProgram = c.WholeProgram
- c.graph.fset = pass.Fset
- }
-
- var visit func(pkg *types.Package)
- visit = func(pkg *types.Package) {
- if _, ok := c.allPackages[pkg]; ok {
- return
- }
- c.allPackages[pkg] = struct{}{}
- for _, imp := range pkg.Imports() {
- visit(imp)
- }
- }
- visit(pass.Pkg)
-
- c.initialPackages[pass.Pkg] = struct{}{}
- c.mu.Unlock()
-
- irpkg := pass.ResultOf[buildir.Analyzer].(*buildir.IR)
- pkg := &pkg{
- Fset: pass.Fset,
- Files: pass.Files,
- Pkg: pass.Pkg,
- TypesInfo: pass.TypesInfo,
- TypesSizes: pass.TypesSizes,
- IR: irpkg.Pkg,
- SrcFuncs: irpkg.SrcFuncs,
- }
-
- c.processPkg(c.graph, pkg)
-
- return nil, nil
-}
-
-func (c *Checker) ProblemObject(fset *token.FileSet, obj types.Object) lint.Problem {
- name := obj.Name()
- if sig, ok := obj.Type().(*types.Signature); ok && sig.Recv() != nil {
- switch sig.Recv().Type().(type) {
- case *types.Named, *types.Pointer:
- typ := types.TypeString(sig.Recv().Type(), func(*types.Package) string { return "" })
- if len(typ) > 0 && typ[0] == '*' {
- name = fmt.Sprintf("(%s).%s", typ, obj.Name())
- } else if len(typ) > 0 {
- name = fmt.Sprintf("%s.%s", typ, obj.Name())
- }
- }
- }
-
- checkName := "U1000"
- if c.WholeProgram {
- checkName = "U1001"
- }
- return lint.Problem{
- Pos: lint.DisplayPosition(fset, obj.Pos()),
- Message: fmt.Sprintf("%s %s is unused", typString(obj), name),
- Check: checkName,
- }
-}
-
-func (c *Checker) Result() []types.Object {
- out := c.results()
-
- out2 := make([]types.Object, 0, len(out))
- for _, v := range out {
- if _, ok := c.initialPackages[v.Pkg()]; !ok {
- continue
- }
- out2 = append(out2, v)
- }
-
- return out2
-}
-
-func (c *Checker) debugf(f string, v ...interface{}) {
- if c.Debug != nil {
- fmt.Fprintf(c.Debug, f, v...)
- }
-}
-
-func (graph *Graph) quieten(node *Node) {
- if node.seen {
- return
- }
- switch obj := node.obj.(type) {
- case *types.Named:
- for i := 0; i < obj.NumMethods(); i++ {
- m := obj.Method(i)
- if node, ok := graph.nodeMaybe(m); ok {
- node.quiet = true
- }
- }
- case *types.Struct:
- for i := 0; i < obj.NumFields(); i++ {
- if node, ok := graph.nodeMaybe(obj.Field(i)); ok {
- node.quiet = true
- }
- }
- case *types.Interface:
- for i := 0; i < obj.NumExplicitMethods(); i++ {
- m := obj.ExplicitMethod(i)
- if node, ok := graph.nodeMaybe(m); ok {
- node.quiet = true
- }
- }
- }
-}
-
-func (c *Checker) results() []types.Object {
- if c.graph == nil {
- // We never analyzed any packages
- return nil
- }
-
- var out []types.Object
-
- if c.WholeProgram {
- var ifaces []*types.Interface
- var notIfaces []types.Type
-
- // implement as many interfaces as possible
- c.graph.seenTypes.Iterate(func(t types.Type, _ interface{}) {
- switch t := t.(type) {
- case *types.Interface:
- if t.NumMethods() > 0 {
- ifaces = append(ifaces, t)
- }
- default:
- if _, ok := t.Underlying().(*types.Interface); !ok {
- notIfaces = append(notIfaces, t)
- }
- }
- })
-
- for pkg := range c.allPackages {
- for _, iface := range interfacesFromExportData(pkg) {
- if iface.NumMethods() > 0 {
- ifaces = append(ifaces, iface)
- }
- }
- }
-
- ctx := &context{
- g: c.graph,
- seenTypes: &c.graph.seenTypes,
- }
- // (8.0) handle interfaces
- // (e2) types aim to implement all exported interfaces from all packages
- for _, t := range notIfaces {
- // OPT(dh): it is unfortunate that we do not have access
- // to a populated method set at this point.
- ms := types.NewMethodSet(t)
- for _, iface := range ifaces {
- if sels, ok := c.graph.implements(t, iface, ms); ok {
- for _, sel := range sels {
- c.graph.useMethod(ctx, t, sel, t, edgeImplements)
- }
- }
- }
- }
- }
-
- if c.Debug != nil {
- debugNode := func(node *Node) {
- if node.obj == nil {
- c.debugf("n%d [label=\"Root\"];\n", node.id)
- } else {
- c.debugf("n%d [label=%q];\n", node.id, fmt.Sprintf("(%T) %s", node.obj, node.obj))
- }
- for _, e := range node.used {
- for i := edgeKind(1); i < 64; i++ {
- if e.kind.is(1 << i) {
- c.debugf("n%d -> n%d [label=%q];\n", node.id, e.node.id, edgeKind(1< 1 {
- cg := &ConstGroup{}
- ctx.see(cg)
- for _, spec := range specs {
- for _, name := range spec.(*ast.ValueSpec).Names {
- obj := pkg.TypesInfo.ObjectOf(name)
- // (10.1) const groups
- ctx.seeAndUse(obj, cg, edgeConstGroup)
- ctx.use(cg, obj, edgeConstGroup)
- }
- }
- }
- }
- case token.VAR:
- for _, spec := range n.Specs {
- v := spec.(*ast.ValueSpec)
- for _, name := range v.Names {
- T := pkg.TypesInfo.TypeOf(name)
- if fn != nil {
- ctx.seeAndUse(T, fn, edgeVarDecl)
- } else {
- // TODO(dh): we likely want to make
- // the type used by the variable, not
- // the package containing the
- // variable. But then we have to take
- // special care of blank identifiers.
- ctx.seeAndUse(T, nil, edgeVarDecl)
- }
- g.typ(ctx, T, nil)
- }
- }
- case token.TYPE:
- for _, spec := range n.Specs {
- // go/types doesn't provide a way to go from a
- // types.Named to the named type it was based on
- // (the t1 in type t2 t1). Therefore we walk the
- // AST and process GenDecls.
- //
- // (2.2) named types use the type they're based on
- v := spec.(*ast.TypeSpec)
- T := pkg.TypesInfo.TypeOf(v.Type)
- obj := pkg.TypesInfo.ObjectOf(v.Name)
- ctx.see(obj)
- ctx.see(T)
- ctx.use(T, obj, edgeType)
- g.typ(ctx, obj.Type(), nil)
- g.typ(ctx, T, nil)
-
- if v.Assign != 0 {
- aliasFor := obj.(*types.TypeName).Type()
- // (2.3) named types use all their aliases. we can't easily track uses of aliases
- if isIrrelevant(aliasFor) {
- // We do not track the type this is an
- // alias for (for example builtins), so
- // just mark the alias used.
- //
- // FIXME(dh): what about aliases declared inside functions?
- ctx.use(obj, nil, edgeAlias)
- } else {
- ctx.see(aliasFor)
- ctx.seeAndUse(obj, aliasFor, edgeAlias)
- }
- }
- }
- }
- }
- return true
- })
- }
-
- for _, m := range pkg.IR.Members {
- switch m := m.(type) {
- case *ir.NamedConst:
- // nothing to do, we collect all constants from Defs
- case *ir.Global:
- if m.Object() != nil {
- ctx.see(m.Object())
- if g.trackExportedIdentifier(ctx, m.Object()) {
- // (1.3) packages use exported variables (unless in package main)
- ctx.use(m.Object(), nil, edgeExportedVariable)
- }
- }
- case *ir.Function:
- mObj := owningObject(m)
- if mObj != nil {
- ctx.see(mObj)
- }
- //lint:ignore SA9003 handled implicitly
- if m.Name() == "init" {
- // (1.5) packages use init functions
- //
- // This is handled implicitly. The generated init
- // function has no object, thus everything in it will
- // be owned by the package.
- }
- // This branch catches top-level functions, not methods.
- if m.Object() != nil && g.trackExportedIdentifier(ctx, m.Object()) {
- // (1.2) packages use exported functions (unless in package main)
- ctx.use(mObj, nil, edgeExportedFunction)
- }
- if m.Name() == "main" && pkg.Pkg.Name() == "main" {
- // (1.7) packages use the main function iff in the main package
- ctx.use(mObj, nil, edgeMainFunction)
- }
- if pkg.Pkg.Path() == "runtime" && runtimeFuncs[m.Name()] {
- // (9.8) runtime functions that may be called from user code via the compiler
- ctx.use(mObj, nil, edgeRuntimeFunction)
- }
- if m.Source() != nil {
- doc := m.Source().(*ast.FuncDecl).Doc
- if doc != nil {
- for _, cmt := range doc.List {
- if strings.HasPrefix(cmt.Text, "//go:cgo_export_") {
- // (1.6) packages use functions exported to cgo
- ctx.use(mObj, nil, edgeCgoExported)
- }
- }
- }
- }
- g.function(ctx, m)
- case *ir.Type:
- if m.Object() != nil {
- ctx.see(m.Object())
- if g.trackExportedIdentifier(ctx, m.Object()) {
- // (1.1) packages use exported named types (unless in package main)
- ctx.use(m.Object(), nil, edgeExportedType)
- }
- }
- g.typ(ctx, m.Type(), nil)
- default:
- panic(fmt.Sprintf("unreachable: %T", m))
- }
- }
-
- if !g.wholeProgram {
- // When not in whole program mode we reset seenTypes after each package,
- // which means g.seenTypes only contains types of
- // interest to us. In whole program mode, we're better off
- // processing all interfaces at once, globally, both for
- // performance reasons and because in whole program mode we
- // actually care about all interfaces, not just the subset
- // that has unexported methods.
-
- var ifaces []*types.Interface
- var notIfaces []types.Type
-
- ctx.seenTypes.Iterate(func(t types.Type, _ interface{}) {
- switch t := t.(type) {
- case *types.Interface:
- // OPT(dh): (8.1) we only need interfaces that have unexported methods
- ifaces = append(ifaces, t)
- default:
- if _, ok := t.Underlying().(*types.Interface); !ok {
- notIfaces = append(notIfaces, t)
- }
- }
- })
-
- // (8.0) handle interfaces
- for _, t := range notIfaces {
- ms := pkg.IR.Prog.MethodSets.MethodSet(t)
- for _, iface := range ifaces {
- if sels, ok := g.implements(t, iface, ms); ok {
- for _, sel := range sels {
- g.useMethod(ctx, t, sel, t, edgeImplements)
- }
- }
- }
- }
- }
-}
-
-func (g *Graph) useMethod(ctx *context, t types.Type, sel *types.Selection, by interface{}, kind edgeKind) {
- obj := sel.Obj()
- path := sel.Index()
- assert(obj != nil)
- if len(path) > 1 {
- base := code.Dereference(t).Underlying().(*types.Struct)
- for _, idx := range path[:len(path)-1] {
- next := base.Field(idx)
- // (6.3) structs use embedded fields that help implement interfaces
- ctx.see(base)
- ctx.seeAndUse(next, base, edgeProvidesMethod)
- base, _ = code.Dereference(next.Type()).Underlying().(*types.Struct)
- }
- }
- ctx.seeAndUse(obj, by, kind)
-}
-
-func owningObject(fn *ir.Function) types.Object {
- if fn.Object() != nil {
- return fn.Object()
- }
- if fn.Parent() != nil {
- return owningObject(fn.Parent())
- }
- return nil
-}
-
-func (g *Graph) function(ctx *context, fn *ir.Function) {
- if fn.Package() != nil && fn.Package() != ctx.pkg.IR {
- return
- }
-
- name := fn.RelString(nil)
- if _, ok := ctx.seenFns[name]; ok {
- return
- }
- ctx.seenFns[name] = struct{}{}
-
- // (4.1) functions use all their arguments, return parameters and receivers
- g.signature(ctx, fn.Signature, owningObject(fn))
- g.instructions(ctx, fn)
- for _, anon := range fn.AnonFuncs {
- // (4.2) functions use anonymous functions defined beneath them
- //
- // This fact is expressed implicitly. Anonymous functions have
- // no types.Object, so their owner is the surrounding
- // function.
- g.function(ctx, anon)
- }
-}
-
-func (g *Graph) typ(ctx *context, t types.Type, parent types.Type) {
- if g.wholeProgram {
- g.mu.Lock()
- }
- if ctx.seenTypes.At(t) != nil {
- if g.wholeProgram {
- g.mu.Unlock()
- }
- return
- }
- if g.wholeProgram {
- g.mu.Unlock()
- }
- if t, ok := t.(*types.Named); ok && t.Obj().Pkg() != nil {
- if t.Obj().Pkg() != ctx.pkg.Pkg {
- return
- }
- }
-
- if g.wholeProgram {
- g.mu.Lock()
- }
- ctx.seenTypes.Set(t, struct{}{})
- if g.wholeProgram {
- g.mu.Unlock()
- }
- if isIrrelevant(t) {
- return
- }
-
- ctx.see(t)
- switch t := t.(type) {
- case *types.Struct:
- for i := 0; i < t.NumFields(); i++ {
- ctx.see(t.Field(i))
- if t.Field(i).Exported() {
- // (6.2) structs use exported fields
- ctx.use(t.Field(i), t, edgeExportedField)
- } else if t.Field(i).Name() == "_" {
- ctx.use(t.Field(i), t, edgeBlankField)
- } else if isNoCopyType(t.Field(i).Type()) {
- // (6.1) structs use fields of type NoCopy sentinel
- ctx.use(t.Field(i), t, edgeNoCopySentinel)
- } else if parent == nil {
- // (11.1) anonymous struct types use all their fields.
- ctx.use(t.Field(i), t, edgeAnonymousStruct)
- }
- if t.Field(i).Anonymous() {
- // (e3) exported identifiers aren't automatically used.
- if !g.wholeProgram {
- // does the embedded field contribute exported methods to the method set?
- T := t.Field(i).Type()
- if _, ok := T.Underlying().(*types.Pointer); !ok {
- // An embedded field is addressable, so check
- // the pointer type to get the full method set
- T = types.NewPointer(T)
- }
- ms := ctx.pkg.IR.Prog.MethodSets.MethodSet(T)
- for j := 0; j < ms.Len(); j++ {
- if ms.At(j).Obj().Exported() {
- // (6.4) structs use embedded fields that have exported methods (recursively)
- ctx.use(t.Field(i), t, edgeExtendsExportedMethodSet)
- break
- }
- }
- }
-
- seen := map[*types.Struct]struct{}{}
- var hasExportedField func(t types.Type) bool
- hasExportedField = func(T types.Type) bool {
- t, ok := code.Dereference(T).Underlying().(*types.Struct)
- if !ok {
- return false
- }
- if _, ok := seen[t]; ok {
- return false
- }
- seen[t] = struct{}{}
- for i := 0; i < t.NumFields(); i++ {
- field := t.Field(i)
- if field.Exported() {
- return true
- }
- if field.Embedded() && hasExportedField(field.Type()) {
- return true
- }
- }
- return false
- }
- // does the embedded field contribute exported fields?
- if hasExportedField(t.Field(i).Type()) {
- // (6.5) structs use embedded structs that have exported fields (recursively)
- ctx.use(t.Field(i), t, edgeExtendsExportedFields)
- }
-
- }
- g.variable(ctx, t.Field(i))
- }
- case *types.Basic:
- // Nothing to do
- case *types.Named:
- // (9.3) types use their underlying and element types
- ctx.seeAndUse(t.Underlying(), t, edgeUnderlyingType)
- ctx.seeAndUse(t.Obj(), t, edgeTypeName)
- ctx.seeAndUse(t, t.Obj(), edgeNamedType)
-
- // (2.4) named types use the pointer type
- if _, ok := t.Underlying().(*types.Interface); !ok && t.NumMethods() > 0 {
- ctx.seeAndUse(types.NewPointer(t), t, edgePointerType)
- }
-
- for i := 0; i < t.NumMethods(); i++ {
- ctx.see(t.Method(i))
- // don't use trackExportedIdentifier here, we care about
- // all exported methods, even in package main or in tests.
- if t.Method(i).Exported() && !g.wholeProgram {
- // (2.1) named types use exported methods
- ctx.use(t.Method(i), t, edgeExportedMethod)
- }
- g.function(ctx, ctx.pkg.IR.Prog.FuncValue(t.Method(i)))
- }
-
- g.typ(ctx, t.Underlying(), t)
- case *types.Slice:
- // (9.3) types use their underlying and element types
- ctx.seeAndUse(t.Elem(), t, edgeElementType)
- g.typ(ctx, t.Elem(), nil)
- case *types.Map:
- // (9.3) types use their underlying and element types
- ctx.seeAndUse(t.Elem(), t, edgeElementType)
- // (9.3) types use their underlying and element types
- ctx.seeAndUse(t.Key(), t, edgeKeyType)
- g.typ(ctx, t.Elem(), nil)
- g.typ(ctx, t.Key(), nil)
- case *types.Signature:
- g.signature(ctx, t, nil)
- case *types.Interface:
- for i := 0; i < t.NumMethods(); i++ {
- m := t.Method(i)
- // (8.3) All interface methods are marked as used
- ctx.seeAndUse(m, t, edgeInterfaceMethod)
- ctx.seeAndUse(m.Type().(*types.Signature), m, edgeSignature)
- g.signature(ctx, m.Type().(*types.Signature), nil)
- }
- for i := 0; i < t.NumEmbeddeds(); i++ {
- tt := t.EmbeddedType(i)
- // (8.4) All embedded interfaces are marked as used
- ctx.seeAndUse(tt, t, edgeEmbeddedInterface)
- }
- case *types.Array:
- // (9.3) types use their underlying and element types
- ctx.seeAndUse(t.Elem(), t, edgeElementType)
- g.typ(ctx, t.Elem(), nil)
- case *types.Pointer:
- // (9.3) types use their underlying and element types
- ctx.seeAndUse(t.Elem(), t, edgeElementType)
- g.typ(ctx, t.Elem(), nil)
- case *types.Chan:
- // (9.3) types use their underlying and element types
- ctx.seeAndUse(t.Elem(), t, edgeElementType)
- g.typ(ctx, t.Elem(), nil)
- case *types.Tuple:
- for i := 0; i < t.Len(); i++ {
- // (9.3) types use their underlying and element types
- ctx.seeAndUse(t.At(i).Type(), t, edgeTupleElement|edgeType)
- g.typ(ctx, t.At(i).Type(), nil)
- }
- default:
- panic(fmt.Sprintf("unreachable: %T", t))
- }
-}
-
-func (g *Graph) variable(ctx *context, v *types.Var) {
- // (9.2) variables use their types
- ctx.seeAndUse(v.Type(), v, edgeType)
- g.typ(ctx, v.Type(), nil)
-}
-
-func (g *Graph) signature(ctx *context, sig *types.Signature, fn types.Object) {
- var user interface{} = fn
- if fn == nil {
- user = sig
- ctx.see(sig)
- }
- if sig.Recv() != nil {
- ctx.seeAndUse(sig.Recv().Type(), user, edgeReceiver|edgeType)
- g.typ(ctx, sig.Recv().Type(), nil)
- }
- for i := 0; i < sig.Params().Len(); i++ {
- param := sig.Params().At(i)
- ctx.seeAndUse(param.Type(), user, edgeFunctionArgument|edgeType)
- g.typ(ctx, param.Type(), nil)
- }
- for i := 0; i < sig.Results().Len(); i++ {
- param := sig.Results().At(i)
- ctx.seeAndUse(param.Type(), user, edgeFunctionResult|edgeType)
- g.typ(ctx, param.Type(), nil)
- }
-}
-
-func (g *Graph) instructions(ctx *context, fn *ir.Function) {
- fnObj := owningObject(fn)
- for _, b := range fn.Blocks {
- for _, instr := range b.Instrs {
- ops := instr.Operands(nil)
- switch instr.(type) {
- case *ir.Store:
- // (9.7) variable _reads_ use variables, writes do not
- ops = ops[1:]
- case *ir.DebugRef:
- ops = nil
- }
- for _, arg := range ops {
- walkPhi(*arg, func(v ir.Value) {
- switch v := v.(type) {
- case *ir.Function:
- // (4.3) functions use closures and bound methods.
- // (4.5) functions use functions they call
- // (9.5) instructions use their operands
- // (4.4) functions use functions they return. we assume that someone else will call the returned function
- if owningObject(v) != nil {
- ctx.seeAndUse(owningObject(v), fnObj, edgeInstructionOperand)
- }
- g.function(ctx, v)
- case *ir.Const:
- // (9.6) instructions use their operands' types
- ctx.seeAndUse(v.Type(), fnObj, edgeType)
- g.typ(ctx, v.Type(), nil)
- case *ir.Global:
- if v.Object() != nil {
- // (9.5) instructions use their operands
- ctx.seeAndUse(v.Object(), fnObj, edgeInstructionOperand)
- }
- }
- })
- }
- if v, ok := instr.(ir.Value); ok {
- if _, ok := v.(*ir.Range); !ok {
- // See https://github.com/golang/go/issues/19670
-
- // (4.8) instructions use their types
- // (9.4) conversions use the type they convert to
- ctx.seeAndUse(v.Type(), fnObj, edgeType)
- g.typ(ctx, v.Type(), nil)
- }
- }
- switch instr := instr.(type) {
- case *ir.Field:
- st := instr.X.Type().Underlying().(*types.Struct)
- field := st.Field(instr.Field)
- // (4.7) functions use fields they access
- ctx.seeAndUse(field, fnObj, edgeFieldAccess)
- case *ir.FieldAddr:
- st := code.Dereference(instr.X.Type()).Underlying().(*types.Struct)
- field := st.Field(instr.Field)
- // (4.7) functions use fields they access
- ctx.seeAndUse(field, fnObj, edgeFieldAccess)
- case *ir.Store:
- // nothing to do, handled generically by operands
- case *ir.Call:
- c := instr.Common()
- if !c.IsInvoke() {
- // handled generically as an instruction operand
-
- if g.wholeProgram {
- // (e3) special case known reflection-based method callers
- switch code.CallName(c) {
- case "net/rpc.Register", "net/rpc.RegisterName", "(*net/rpc.Server).Register", "(*net/rpc.Server).RegisterName":
- var arg ir.Value
- switch code.CallName(c) {
- case "net/rpc.Register":
- arg = c.Args[0]
- case "net/rpc.RegisterName":
- arg = c.Args[1]
- case "(*net/rpc.Server).Register":
- arg = c.Args[1]
- case "(*net/rpc.Server).RegisterName":
- arg = c.Args[2]
- }
- walkPhi(arg, func(v ir.Value) {
- if v, ok := v.(*ir.MakeInterface); ok {
- walkPhi(v.X, func(vv ir.Value) {
- ms := ctx.pkg.IR.Prog.MethodSets.MethodSet(vv.Type())
- for i := 0; i < ms.Len(); i++ {
- if ms.At(i).Obj().Exported() {
- g.useMethod(ctx, vv.Type(), ms.At(i), fnObj, edgeNetRPCRegister)
- }
- }
- })
- }
- })
- }
- }
- } else {
- // (4.5) functions use functions/interface methods they call
- ctx.seeAndUse(c.Method, fnObj, edgeInterfaceCall)
- }
- case *ir.Return:
- // nothing to do, handled generically by operands
- case *ir.ChangeType:
- // conversion type handled generically
-
- s1, ok1 := code.Dereference(instr.Type()).Underlying().(*types.Struct)
- s2, ok2 := code.Dereference(instr.X.Type()).Underlying().(*types.Struct)
- if ok1 && ok2 {
- // Converting between two structs. The fields are
- // relevant for the conversion, but only if the
- // fields are also used outside of the conversion.
- // Mark fields as used by each other.
-
- assert(s1.NumFields() == s2.NumFields())
- for i := 0; i < s1.NumFields(); i++ {
- ctx.see(s1.Field(i))
- ctx.see(s2.Field(i))
- // (5.1) when converting between two equivalent structs, the fields in
- // either struct use each other. the fields are relevant for the
- // conversion, but only if the fields are also accessed outside the
- // conversion.
- ctx.seeAndUse(s1.Field(i), s2.Field(i), edgeStructConversion)
- ctx.seeAndUse(s2.Field(i), s1.Field(i), edgeStructConversion)
- }
- }
- case *ir.MakeInterface:
- // nothing to do, handled generically by operands
- case *ir.Slice:
- // nothing to do, handled generically by operands
- case *ir.RunDefers:
- // nothing to do, the deferred functions are already marked use by defering them.
- case *ir.Convert:
- // to unsafe.Pointer
- if typ, ok := instr.Type().(*types.Basic); ok && typ.Kind() == types.UnsafePointer {
- if ptr, ok := instr.X.Type().Underlying().(*types.Pointer); ok {
- if st, ok := ptr.Elem().Underlying().(*types.Struct); ok {
- for i := 0; i < st.NumFields(); i++ {
- // (5.2) when converting to or from unsafe.Pointer, mark all fields as used.
- ctx.seeAndUse(st.Field(i), fnObj, edgeUnsafeConversion)
- }
- }
- }
- }
- // from unsafe.Pointer
- if typ, ok := instr.X.Type().(*types.Basic); ok && typ.Kind() == types.UnsafePointer {
- if ptr, ok := instr.Type().Underlying().(*types.Pointer); ok {
- if st, ok := ptr.Elem().Underlying().(*types.Struct); ok {
- for i := 0; i < st.NumFields(); i++ {
- // (5.2) when converting to or from unsafe.Pointer, mark all fields as used.
- ctx.seeAndUse(st.Field(i), fnObj, edgeUnsafeConversion)
- }
- }
- }
- }
- case *ir.TypeAssert:
- // nothing to do, handled generically by instruction
- // type (possibly a tuple, which contains the asserted
- // to type). redundantly handled by the type of
- // ir.Extract, too
- case *ir.MakeClosure:
- // nothing to do, handled generically by operands
- case *ir.Alloc:
- // nothing to do
- case *ir.UnOp:
- // nothing to do
- case *ir.BinOp:
- // nothing to do
- case *ir.If:
- // nothing to do
- case *ir.Jump:
- // nothing to do
- case *ir.Unreachable:
- // nothing to do
- case *ir.IndexAddr:
- // nothing to do
- case *ir.Extract:
- // nothing to do
- case *ir.Panic:
- // nothing to do
- case *ir.DebugRef:
- // nothing to do
- case *ir.BlankStore:
- // nothing to do
- case *ir.Phi:
- // nothing to do
- case *ir.Sigma:
- // nothing to do
- case *ir.MakeMap:
- // nothing to do
- case *ir.MapUpdate:
- // nothing to do
- case *ir.MapLookup:
- // nothing to do
- case *ir.StringLookup:
- // nothing to do
- case *ir.MakeSlice:
- // nothing to do
- case *ir.Send:
- // nothing to do
- case *ir.MakeChan:
- // nothing to do
- case *ir.Range:
- // nothing to do
- case *ir.Next:
- // nothing to do
- case *ir.Index:
- // nothing to do
- case *ir.Select:
- // nothing to do
- case *ir.ChangeInterface:
- // nothing to do
- case *ir.Load:
- // nothing to do
- case *ir.Go:
- // nothing to do
- case *ir.Defer:
- // nothing to do
- case *ir.Parameter:
- // nothing to do
- case *ir.Const:
- // nothing to do
- case *ir.Recv:
- // nothing to do
- case *ir.TypeSwitch:
- // nothing to do
- case *ir.ConstantSwitch:
- // nothing to do
- default:
- panic(fmt.Sprintf("unreachable: %T", instr))
- }
- }
- }
-}
-
-// isNoCopyType reports whether a type represents the NoCopy sentinel
-// type. The NoCopy type is a named struct with no fields and exactly
-// one method `func Lock()` that is empty.
-//
-// FIXME(dh): currently we're not checking that the function body is
-// empty.
-func isNoCopyType(typ types.Type) bool {
- st, ok := typ.Underlying().(*types.Struct)
- if !ok {
- return false
- }
- if st.NumFields() != 0 {
- return false
- }
-
- named, ok := typ.(*types.Named)
- if !ok {
- return false
- }
- if named.NumMethods() != 1 {
- return false
- }
- meth := named.Method(0)
- if meth.Name() != "Lock" {
- return false
- }
- sig := meth.Type().(*types.Signature)
- if sig.Params().Len() != 0 || sig.Results().Len() != 0 {
- return false
- }
- return true
-}
-
-func walkPhi(v ir.Value, fn func(v ir.Value)) {
- phi, ok := v.(*ir.Phi)
- if !ok {
- fn(v)
- return
- }
-
- seen := map[ir.Value]struct{}{}
- var impl func(v *ir.Phi)
- impl = func(v *ir.Phi) {
- if _, ok := seen[v]; ok {
- return
- }
- seen[v] = struct{}{}
- for _, e := range v.Edges {
- if ev, ok := e.(*ir.Phi); ok {
- impl(ev)
- } else {
- fn(e)
- }
- }
- }
- impl(phi)
-}
-
-func interfacesFromExportData(pkg *types.Package) []*types.Interface {
- var out []*types.Interface
- scope := pkg.Scope()
- for _, name := range scope.Names() {
- obj := scope.Lookup(name)
- out = append(out, interfacesFromObject(obj)...)
- }
- return out
-}
-
-func interfacesFromObject(obj types.Object) []*types.Interface {
- var out []*types.Interface
- switch obj := obj.(type) {
- case *types.Func:
- sig := obj.Type().(*types.Signature)
- for i := 0; i < sig.Results().Len(); i++ {
- out = append(out, interfacesFromObject(sig.Results().At(i))...)
- }
- for i := 0; i < sig.Params().Len(); i++ {
- out = append(out, interfacesFromObject(sig.Params().At(i))...)
- }
- case *types.TypeName:
- if named, ok := obj.Type().(*types.Named); ok {
- for i := 0; i < named.NumMethods(); i++ {
- out = append(out, interfacesFromObject(named.Method(i))...)
- }
-
- if iface, ok := named.Underlying().(*types.Interface); ok {
- out = append(out, iface)
- }
- }
- case *types.Var:
- // No call to Underlying here. We want unnamed interfaces
- // only. Named interfaces are gotten directly from the
- // package's scope.
- if iface, ok := obj.Type().(*types.Interface); ok {
- out = append(out, iface)
- }
- case *types.Const:
- case *types.Builtin:
- default:
- panic(fmt.Sprintf("unhandled type: %T", obj))
- }
- return out
-}
diff --git a/vendor/honnef.co/go/tools/version/buildinfo.go b/vendor/honnef.co/go/tools/version/buildinfo.go
deleted file mode 100644
index b6034bb7dcd..00000000000
--- a/vendor/honnef.co/go/tools/version/buildinfo.go
+++ /dev/null
@@ -1,46 +0,0 @@
-// +build go1.12
-
-package version
-
-import (
- "fmt"
- "runtime/debug"
-)
-
-func printBuildInfo() {
- if info, ok := debug.ReadBuildInfo(); ok {
- fmt.Println("Main module:")
- printModule(&info.Main)
- fmt.Println("Dependencies:")
- for _, dep := range info.Deps {
- printModule(dep)
- }
- } else {
- fmt.Println("Built without Go modules")
- }
-}
-
-func buildInfoVersion() (string, bool) {
- info, ok := debug.ReadBuildInfo()
- if !ok {
- return "", false
- }
- if info.Main.Version == "(devel)" {
- return "", false
- }
- return info.Main.Version, true
-}
-
-func printModule(m *debug.Module) {
- fmt.Printf("\t%s", m.Path)
- if m.Version != "(devel)" {
- fmt.Printf("@%s", m.Version)
- }
- if m.Sum != "" {
- fmt.Printf(" (sum: %s)", m.Sum)
- }
- if m.Replace != nil {
- fmt.Printf(" (replace: %s)", m.Replace.Path)
- }
- fmt.Println()
-}
diff --git a/vendor/honnef.co/go/tools/version/buildinfo111.go b/vendor/honnef.co/go/tools/version/buildinfo111.go
deleted file mode 100644
index 06aae1e65bb..00000000000
--- a/vendor/honnef.co/go/tools/version/buildinfo111.go
+++ /dev/null
@@ -1,6 +0,0 @@
-// +build !go1.12
-
-package version
-
-func printBuildInfo() {}
-func buildInfoVersion() (string, bool) { return "", false }
diff --git a/vendor/honnef.co/go/tools/version/version.go b/vendor/honnef.co/go/tools/version/version.go
deleted file mode 100644
index eed7b0def99..00000000000
--- a/vendor/honnef.co/go/tools/version/version.go
+++ /dev/null
@@ -1,42 +0,0 @@
-package version
-
-import (
- "fmt"
- "os"
- "path/filepath"
- "runtime"
-)
-
-const Version = "2020.1.4"
-
-// version returns a version descriptor and reports whether the
-// version is a known release.
-func version() (string, bool) {
- if Version != "devel" {
- return Version, true
- }
- v, ok := buildInfoVersion()
- if ok {
- return v, false
- }
- return "devel", false
-}
-
-func Print() {
- v, release := version()
-
- if release {
- fmt.Printf("%s %s\n", filepath.Base(os.Args[0]), v)
- } else if v == "devel" {
- fmt.Printf("%s (no version)\n", filepath.Base(os.Args[0]))
- } else {
- fmt.Printf("%s (devel, %s)\n", filepath.Base(os.Args[0]), v)
- }
-}
-
-func Verbose() {
- Print()
- fmt.Println()
- fmt.Println("Compiled with Go version:", runtime.Version())
- printBuildInfo()
-}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/doc.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/doc.go
new file mode 100644
index 00000000000..5893df5bd26
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package httpstream adds multiplexed streaming support to HTTP requests and
+// responses via connection upgrades.
+package httpstream // import "k8s.io/apimachinery/pkg/util/httpstream"
diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go
new file mode 100644
index 00000000000..50d9a366f36
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/httpstream.go
@@ -0,0 +1,149 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package httpstream
+
+import (
+ "fmt"
+ "io"
+ "net/http"
+ "strings"
+ "time"
+)
+
+const (
+ HeaderConnection = "Connection"
+ HeaderUpgrade = "Upgrade"
+ HeaderProtocolVersion = "X-Stream-Protocol-Version"
+ HeaderAcceptedProtocolVersions = "X-Accepted-Stream-Protocol-Versions"
+)
+
+// NewStreamHandler defines a function that is called when a new Stream is
+// received. If no error is returned, the Stream is accepted; otherwise,
+// the stream is rejected. After the reply frame has been sent, replySent is closed.
+type NewStreamHandler func(stream Stream, replySent <-chan struct{}) error
+
+// NoOpNewStreamHandler is a stream handler that accepts a new stream and
+// performs no other logic.
+func NoOpNewStreamHandler(stream Stream, replySent <-chan struct{}) error { return nil }
+
+// Dialer knows how to open a streaming connection to a server.
+type Dialer interface {
+
+ // Dial opens a streaming connection to a server using one of the protocols
+ // specified (in order of most preferred to least preferred).
+ Dial(protocols ...string) (Connection, string, error)
+}
+
+// UpgradeRoundTripper is a type of http.RoundTripper that is able to upgrade
+// HTTP requests to support multiplexed bidirectional streams. After RoundTrip()
+// is invoked, if the upgrade is successful, clients may retrieve the upgraded
+// connection by calling UpgradeRoundTripper.Connection().
+type UpgradeRoundTripper interface {
+ http.RoundTripper
+ // NewConnection validates the response and creates a new Connection.
+ NewConnection(resp *http.Response) (Connection, error)
+}
+
+// ResponseUpgrader knows how to upgrade HTTP requests and responses to
+// add streaming support to them.
+type ResponseUpgrader interface {
+ // UpgradeResponse upgrades an HTTP response to one that supports multiplexed
+ // streams. newStreamHandler will be called asynchronously whenever the
+ // other end of the upgraded connection creates a new stream.
+ UpgradeResponse(w http.ResponseWriter, req *http.Request, newStreamHandler NewStreamHandler) Connection
+}
+
+// Connection represents an upgraded HTTP connection.
+type Connection interface {
+ // CreateStream creates a new Stream with the supplied headers.
+ CreateStream(headers http.Header) (Stream, error)
+ // Close resets all streams and closes the connection.
+ Close() error
+ // CloseChan returns a channel that is closed when the underlying connection is closed.
+ CloseChan() <-chan bool
+ // SetIdleTimeout sets the amount of time the connection may remain idle before
+ // it is automatically closed.
+ SetIdleTimeout(timeout time.Duration)
+}
+
+// Stream represents a bidirectional communications channel that is part of an
+// upgraded connection.
+type Stream interface {
+ io.ReadWriteCloser
+ // Reset closes both directions of the stream, indicating that neither client
+ // or server can use it any more.
+ Reset() error
+ // Headers returns the headers used to create the stream.
+ Headers() http.Header
+ // Identifier returns the stream's ID.
+ Identifier() uint32
+}
+
+// IsUpgradeRequest returns true if the given request is a connection upgrade request
+func IsUpgradeRequest(req *http.Request) bool {
+ for _, h := range req.Header[http.CanonicalHeaderKey(HeaderConnection)] {
+ if strings.Contains(strings.ToLower(h), strings.ToLower(HeaderUpgrade)) {
+ return true
+ }
+ }
+ return false
+}
+
+func negotiateProtocol(clientProtocols, serverProtocols []string) string {
+ for i := range clientProtocols {
+ for j := range serverProtocols {
+ if clientProtocols[i] == serverProtocols[j] {
+ return clientProtocols[i]
+ }
+ }
+ }
+ return ""
+}
+
+// Handshake performs a subprotocol negotiation. If the client did request a
+// subprotocol, Handshake will select the first common value found in
+// serverProtocols. If a match is found, Handshake adds a response header
+// indicating the chosen subprotocol. If no match is found, HTTP forbidden is
+// returned, along with a response header containing the list of protocols the
+// server can accept.
+func Handshake(req *http.Request, w http.ResponseWriter, serverProtocols []string) (string, error) {
+ clientProtocols := req.Header[http.CanonicalHeaderKey(HeaderProtocolVersion)]
+ if len(clientProtocols) == 0 {
+ // Kube 1.0 clients didn't support subprotocol negotiation.
+ // TODO require clientProtocols once Kube 1.0 is no longer supported
+ return "", nil
+ }
+
+ if len(serverProtocols) == 0 {
+ // Kube 1.0 servers didn't support subprotocol negotiation. This is mainly for testing.
+ // TODO require serverProtocols once Kube 1.0 is no longer supported
+ return "", nil
+ }
+
+ negotiatedProtocol := negotiateProtocol(clientProtocols, serverProtocols)
+ if len(negotiatedProtocol) == 0 {
+ for i := range serverProtocols {
+ w.Header().Add(HeaderAcceptedProtocolVersions, serverProtocols[i])
+ }
+ err := fmt.Errorf("unable to upgrade: unable to negotiate protocol: client supports %v, server accepts %v", clientProtocols, serverProtocols)
+ http.Error(w, err.Error(), http.StatusForbidden)
+ return "", err
+ }
+
+ w.Header().Add(HeaderProtocolVersion, negotiatedProtocol)
+ return negotiatedProtocol, nil
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go
new file mode 100644
index 00000000000..9d222faa898
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/connection.go
@@ -0,0 +1,145 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package spdy
+
+import (
+ "net"
+ "net/http"
+ "sync"
+ "time"
+
+ "github.com/docker/spdystream"
+ "k8s.io/apimachinery/pkg/util/httpstream"
+ "k8s.io/klog"
+)
+
+// connection maintains state about a spdystream.Connection and its associated
+// streams.
+type connection struct {
+ conn *spdystream.Connection
+ streams []httpstream.Stream
+ streamLock sync.Mutex
+ newStreamHandler httpstream.NewStreamHandler
+}
+
+// NewClientConnection creates a new SPDY client connection.
+func NewClientConnection(conn net.Conn) (httpstream.Connection, error) {
+ spdyConn, err := spdystream.NewConnection(conn, false)
+ if err != nil {
+ defer conn.Close()
+ return nil, err
+ }
+
+ return newConnection(spdyConn, httpstream.NoOpNewStreamHandler), nil
+}
+
+// NewServerConnection creates a new SPDY server connection. newStreamHandler
+// will be invoked when the server receives a newly created stream from the
+// client.
+func NewServerConnection(conn net.Conn, newStreamHandler httpstream.NewStreamHandler) (httpstream.Connection, error) {
+ spdyConn, err := spdystream.NewConnection(conn, true)
+ if err != nil {
+ defer conn.Close()
+ return nil, err
+ }
+
+ return newConnection(spdyConn, newStreamHandler), nil
+}
+
+// newConnection returns a new connection wrapping conn. newStreamHandler
+// will be invoked when the server receives a newly created stream from the
+// client.
+func newConnection(conn *spdystream.Connection, newStreamHandler httpstream.NewStreamHandler) httpstream.Connection {
+ c := &connection{conn: conn, newStreamHandler: newStreamHandler}
+ go conn.Serve(c.newSpdyStream)
+ return c
+}
+
+// createStreamResponseTimeout indicates how long to wait for the other side to
+// acknowledge the new stream before timing out.
+const createStreamResponseTimeout = 30 * time.Second
+
+// Close first sends a reset for all of the connection's streams, and then
+// closes the underlying spdystream.Connection.
+func (c *connection) Close() error {
+ c.streamLock.Lock()
+ for _, s := range c.streams {
+ // calling Reset instead of Close ensures that all streams are fully torn down
+ s.Reset()
+ }
+ c.streams = make([]httpstream.Stream, 0)
+ c.streamLock.Unlock()
+
+ // now that all streams are fully torn down, it's safe to call close on the underlying connection,
+ // which should be able to terminate immediately at this point, instead of waiting for any
+ // remaining graceful stream termination.
+ return c.conn.Close()
+}
+
+// CreateStream creates a new stream with the specified headers and registers
+// it with the connection.
+func (c *connection) CreateStream(headers http.Header) (httpstream.Stream, error) {
+ stream, err := c.conn.CreateStream(headers, nil, false)
+ if err != nil {
+ return nil, err
+ }
+ if err = stream.WaitTimeout(createStreamResponseTimeout); err != nil {
+ return nil, err
+ }
+
+ c.registerStream(stream)
+ return stream, nil
+}
+
+// registerStream adds the stream s to the connection's list of streams that
+// it owns.
+func (c *connection) registerStream(s httpstream.Stream) {
+ c.streamLock.Lock()
+ c.streams = append(c.streams, s)
+ c.streamLock.Unlock()
+}
+
+// CloseChan returns a channel that, when closed, indicates that the underlying
+// spdystream.Connection has been closed.
+func (c *connection) CloseChan() <-chan bool {
+ return c.conn.CloseChan()
+}
+
+// newSpdyStream is the internal new stream handler used by spdystream.Connection.Serve.
+// It calls connection's newStreamHandler, giving it the opportunity to accept or reject
+// the stream. If newStreamHandler returns an error, the stream is rejected. If not, the
+// stream is accepted and registered with the connection.
+func (c *connection) newSpdyStream(stream *spdystream.Stream) {
+ replySent := make(chan struct{})
+ err := c.newStreamHandler(stream, replySent)
+ rejectStream := (err != nil)
+ if rejectStream {
+ klog.Warningf("Stream rejected: %v", err)
+ stream.Reset()
+ return
+ }
+
+ c.registerStream(stream)
+ stream.SendReply(http.Header{}, rejectStream)
+ close(replySent)
+}
+
+// SetIdleTimeout sets the amount of time the connection may remain idle before
+// it is automatically closed.
+func (c *connection) SetIdleTimeout(timeout time.Duration) {
+ c.conn.SetIdleTimeout(timeout)
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go
new file mode 100644
index 00000000000..2699597e7a5
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/roundtripper.go
@@ -0,0 +1,335 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package spdy
+
+import (
+ "bufio"
+ "bytes"
+ "context"
+ "crypto/tls"
+ "encoding/base64"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "net/http/httputil"
+ "net/url"
+ "strings"
+
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/runtime/serializer"
+ "k8s.io/apimachinery/pkg/util/httpstream"
+ utilnet "k8s.io/apimachinery/pkg/util/net"
+ "k8s.io/apimachinery/third_party/forked/golang/netutil"
+)
+
+// SpdyRoundTripper knows how to upgrade an HTTP request to one that supports
+// multiplexed streams. After RoundTrip() is invoked, Conn will be set
+// and usable. SpdyRoundTripper implements the UpgradeRoundTripper interface.
+type SpdyRoundTripper struct {
+ //tlsConfig holds the TLS configuration settings to use when connecting
+ //to the remote server.
+ tlsConfig *tls.Config
+
+ /* TODO according to http://golang.org/pkg/net/http/#RoundTripper, a RoundTripper
+ must be safe for use by multiple concurrent goroutines. If this is absolutely
+ necessary, we could keep a map from http.Request to net.Conn. In practice,
+ a client will create an http.Client, set the transport to a new insteace of
+ SpdyRoundTripper, and use it a single time, so this hopefully won't be an issue.
+ */
+ // conn is the underlying network connection to the remote server.
+ conn net.Conn
+
+ // Dialer is the dialer used to connect. Used if non-nil.
+ Dialer *net.Dialer
+
+ // proxier knows which proxy to use given a request, defaults to http.ProxyFromEnvironment
+ // Used primarily for mocking the proxy discovery in tests.
+ proxier func(req *http.Request) (*url.URL, error)
+
+ // followRedirects indicates if the round tripper should examine responses for redirects and
+ // follow them.
+ followRedirects bool
+ // requireSameHostRedirects restricts redirect following to only follow redirects to the same host
+ // as the original request.
+ requireSameHostRedirects bool
+}
+
+var _ utilnet.TLSClientConfigHolder = &SpdyRoundTripper{}
+var _ httpstream.UpgradeRoundTripper = &SpdyRoundTripper{}
+var _ utilnet.Dialer = &SpdyRoundTripper{}
+
+// NewRoundTripper creates a new SpdyRoundTripper that will use
+// the specified tlsConfig.
+func NewRoundTripper(tlsConfig *tls.Config, followRedirects, requireSameHostRedirects bool) httpstream.UpgradeRoundTripper {
+ return NewSpdyRoundTripper(tlsConfig, followRedirects, requireSameHostRedirects)
+}
+
+// NewSpdyRoundTripper creates a new SpdyRoundTripper that will use
+// the specified tlsConfig. This function is mostly meant for unit tests.
+func NewSpdyRoundTripper(tlsConfig *tls.Config, followRedirects, requireSameHostRedirects bool) *SpdyRoundTripper {
+ return &SpdyRoundTripper{
+ tlsConfig: tlsConfig,
+ followRedirects: followRedirects,
+ requireSameHostRedirects: requireSameHostRedirects,
+ }
+}
+
+// TLSClientConfig implements pkg/util/net.TLSClientConfigHolder for proper TLS checking during
+// proxying with a spdy roundtripper.
+func (s *SpdyRoundTripper) TLSClientConfig() *tls.Config {
+ return s.tlsConfig
+}
+
+// Dial implements k8s.io/apimachinery/pkg/util/net.Dialer.
+func (s *SpdyRoundTripper) Dial(req *http.Request) (net.Conn, error) {
+ conn, err := s.dial(req)
+ if err != nil {
+ return nil, err
+ }
+
+ if err := req.Write(conn); err != nil {
+ conn.Close()
+ return nil, err
+ }
+
+ return conn, nil
+}
+
+// dial dials the host specified by req, using TLS if appropriate, optionally
+// using a proxy server if one is configured via environment variables.
+func (s *SpdyRoundTripper) dial(req *http.Request) (net.Conn, error) {
+ proxier := s.proxier
+ if proxier == nil {
+ proxier = utilnet.NewProxierWithNoProxyCIDR(http.ProxyFromEnvironment)
+ }
+ proxyURL, err := proxier(req)
+ if err != nil {
+ return nil, err
+ }
+
+ if proxyURL == nil {
+ return s.dialWithoutProxy(req.Context(), req.URL)
+ }
+
+ // ensure we use a canonical host with proxyReq
+ targetHost := netutil.CanonicalAddr(req.URL)
+
+ // proxying logic adapted from http://blog.h6t.eu/post/74098062923/golang-websocket-with-http-proxy-support
+ proxyReq := http.Request{
+ Method: "CONNECT",
+ URL: &url.URL{},
+ Host: targetHost,
+ }
+
+ if pa := s.proxyAuth(proxyURL); pa != "" {
+ proxyReq.Header = http.Header{}
+ proxyReq.Header.Set("Proxy-Authorization", pa)
+ }
+
+ proxyDialConn, err := s.dialWithoutProxy(req.Context(), proxyURL)
+ if err != nil {
+ return nil, err
+ }
+
+ proxyClientConn := httputil.NewProxyClientConn(proxyDialConn, nil)
+ _, err = proxyClientConn.Do(&proxyReq)
+ if err != nil && err != httputil.ErrPersistEOF {
+ return nil, err
+ }
+
+ rwc, _ := proxyClientConn.Hijack()
+
+ if req.URL.Scheme != "https" {
+ return rwc, nil
+ }
+
+ host, _, err := net.SplitHostPort(targetHost)
+ if err != nil {
+ return nil, err
+ }
+
+ tlsConfig := s.tlsConfig
+ switch {
+ case tlsConfig == nil:
+ tlsConfig = &tls.Config{ServerName: host}
+ case len(tlsConfig.ServerName) == 0:
+ tlsConfig = tlsConfig.Clone()
+ tlsConfig.ServerName = host
+ }
+
+ tlsConn := tls.Client(rwc, tlsConfig)
+
+ // need to manually call Handshake() so we can call VerifyHostname() below
+ if err := tlsConn.Handshake(); err != nil {
+ return nil, err
+ }
+
+ // Return if we were configured to skip validation
+ if tlsConfig.InsecureSkipVerify {
+ return tlsConn, nil
+ }
+
+ if err := tlsConn.VerifyHostname(tlsConfig.ServerName); err != nil {
+ return nil, err
+ }
+
+ return tlsConn, nil
+}
+
+// dialWithoutProxy dials the host specified by url, using TLS if appropriate.
+func (s *SpdyRoundTripper) dialWithoutProxy(ctx context.Context, url *url.URL) (net.Conn, error) {
+ dialAddr := netutil.CanonicalAddr(url)
+
+ if url.Scheme == "http" {
+ if s.Dialer == nil {
+ var d net.Dialer
+ return d.DialContext(ctx, "tcp", dialAddr)
+ } else {
+ return s.Dialer.DialContext(ctx, "tcp", dialAddr)
+ }
+ }
+
+ // TODO validate the TLSClientConfig is set up?
+ var conn *tls.Conn
+ var err error
+ if s.Dialer == nil {
+ conn, err = tls.Dial("tcp", dialAddr, s.tlsConfig)
+ } else {
+ conn, err = tls.DialWithDialer(s.Dialer, "tcp", dialAddr, s.tlsConfig)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ // Return if we were configured to skip validation
+ if s.tlsConfig != nil && s.tlsConfig.InsecureSkipVerify {
+ return conn, nil
+ }
+
+ host, _, err := net.SplitHostPort(dialAddr)
+ if err != nil {
+ return nil, err
+ }
+ if s.tlsConfig != nil && len(s.tlsConfig.ServerName) > 0 {
+ host = s.tlsConfig.ServerName
+ }
+ err = conn.VerifyHostname(host)
+ if err != nil {
+ return nil, err
+ }
+
+ return conn, nil
+}
+
+// proxyAuth returns, for a given proxy URL, the value to be used for the Proxy-Authorization header
+func (s *SpdyRoundTripper) proxyAuth(proxyURL *url.URL) string {
+ if proxyURL == nil || proxyURL.User == nil {
+ return ""
+ }
+ credentials := proxyURL.User.String()
+ encodedAuth := base64.StdEncoding.EncodeToString([]byte(credentials))
+ return fmt.Sprintf("Basic %s", encodedAuth)
+}
+
+// RoundTrip executes the Request and upgrades it. After a successful upgrade,
+// clients may call SpdyRoundTripper.Connection() to retrieve the upgraded
+// connection.
+func (s *SpdyRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
+ header := utilnet.CloneHeader(req.Header)
+ header.Add(httpstream.HeaderConnection, httpstream.HeaderUpgrade)
+ header.Add(httpstream.HeaderUpgrade, HeaderSpdy31)
+
+ var (
+ conn net.Conn
+ rawResponse []byte
+ err error
+ )
+
+ if s.followRedirects {
+ conn, rawResponse, err = utilnet.ConnectWithRedirects(req.Method, req.URL, header, req.Body, s, s.requireSameHostRedirects)
+ } else {
+ clone := utilnet.CloneRequest(req)
+ clone.Header = header
+ conn, err = s.Dial(clone)
+ }
+ if err != nil {
+ return nil, err
+ }
+
+ responseReader := bufio.NewReader(
+ io.MultiReader(
+ bytes.NewBuffer(rawResponse),
+ conn,
+ ),
+ )
+
+ resp, err := http.ReadResponse(responseReader, nil)
+ if err != nil {
+ if conn != nil {
+ conn.Close()
+ }
+ return nil, err
+ }
+
+ s.conn = conn
+
+ return resp, nil
+}
+
+// NewConnection validates the upgrade response, creating and returning a new
+// httpstream.Connection if there were no errors.
+func (s *SpdyRoundTripper) NewConnection(resp *http.Response) (httpstream.Connection, error) {
+ connectionHeader := strings.ToLower(resp.Header.Get(httpstream.HeaderConnection))
+ upgradeHeader := strings.ToLower(resp.Header.Get(httpstream.HeaderUpgrade))
+ if (resp.StatusCode != http.StatusSwitchingProtocols) || !strings.Contains(connectionHeader, strings.ToLower(httpstream.HeaderUpgrade)) || !strings.Contains(upgradeHeader, strings.ToLower(HeaderSpdy31)) {
+ defer resp.Body.Close()
+ responseError := ""
+ responseErrorBytes, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ responseError = "unable to read error from server response"
+ } else {
+ // TODO: I don't belong here, I should be abstracted from this class
+ if obj, _, err := statusCodecs.UniversalDecoder().Decode(responseErrorBytes, nil, &metav1.Status{}); err == nil {
+ if status, ok := obj.(*metav1.Status); ok {
+ return nil, &apierrors.StatusError{ErrStatus: *status}
+ }
+ }
+ responseError = string(responseErrorBytes)
+ responseError = strings.TrimSpace(responseError)
+ }
+
+ return nil, fmt.Errorf("unable to upgrade connection: %s", responseError)
+ }
+
+ return NewClientConnection(s.conn)
+}
+
+// statusScheme is private scheme for the decoding here until someone fixes the TODO in NewConnection
+var statusScheme = runtime.NewScheme()
+
+// ParameterCodec knows about query parameters used with the meta v1 API spec.
+var statusCodecs = serializer.NewCodecFactory(statusScheme)
+
+func init() {
+ statusScheme.AddUnversionedTypes(metav1.SchemeGroupVersion,
+ &metav1.Status{},
+ )
+}
diff --git a/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go
new file mode 100644
index 00000000000..045d214d2b7
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/pkg/util/httpstream/spdy/upgrade.go
@@ -0,0 +1,107 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package spdy
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "strings"
+ "sync/atomic"
+
+ "k8s.io/apimachinery/pkg/util/httpstream"
+ "k8s.io/apimachinery/pkg/util/runtime"
+)
+
+const HeaderSpdy31 = "SPDY/3.1"
+
+// responseUpgrader knows how to upgrade HTTP responses. It
+// implements the httpstream.ResponseUpgrader interface.
+type responseUpgrader struct {
+}
+
+// connWrapper is used to wrap a hijacked connection and its bufio.Reader. All
+// calls will be handled directly by the underlying net.Conn with the exception
+// of Read and Close calls, which will consider data in the bufio.Reader. This
+// ensures that data already inside the used bufio.Reader instance is also
+// read.
+type connWrapper struct {
+ net.Conn
+ closed int32
+ bufReader *bufio.Reader
+}
+
+func (w *connWrapper) Read(b []byte) (n int, err error) {
+ if atomic.LoadInt32(&w.closed) == 1 {
+ return 0, io.EOF
+ }
+ return w.bufReader.Read(b)
+}
+
+func (w *connWrapper) Close() error {
+ err := w.Conn.Close()
+ atomic.StoreInt32(&w.closed, 1)
+ return err
+}
+
+// NewResponseUpgrader returns a new httpstream.ResponseUpgrader that is
+// capable of upgrading HTTP responses using SPDY/3.1 via the
+// spdystream package.
+func NewResponseUpgrader() httpstream.ResponseUpgrader {
+ return responseUpgrader{}
+}
+
+// UpgradeResponse upgrades an HTTP response to one that supports multiplexed
+// streams. newStreamHandler will be called synchronously whenever the
+// other end of the upgraded connection creates a new stream.
+func (u responseUpgrader) UpgradeResponse(w http.ResponseWriter, req *http.Request, newStreamHandler httpstream.NewStreamHandler) httpstream.Connection {
+ connectionHeader := strings.ToLower(req.Header.Get(httpstream.HeaderConnection))
+ upgradeHeader := strings.ToLower(req.Header.Get(httpstream.HeaderUpgrade))
+ if !strings.Contains(connectionHeader, strings.ToLower(httpstream.HeaderUpgrade)) || !strings.Contains(upgradeHeader, strings.ToLower(HeaderSpdy31)) {
+ errorMsg := fmt.Sprintf("unable to upgrade: missing upgrade headers in request: %#v", req.Header)
+ http.Error(w, errorMsg, http.StatusBadRequest)
+ return nil
+ }
+
+ hijacker, ok := w.(http.Hijacker)
+ if !ok {
+ errorMsg := fmt.Sprintf("unable to upgrade: unable to hijack response")
+ http.Error(w, errorMsg, http.StatusInternalServerError)
+ return nil
+ }
+
+ w.Header().Add(httpstream.HeaderConnection, httpstream.HeaderUpgrade)
+ w.Header().Add(httpstream.HeaderUpgrade, HeaderSpdy31)
+ w.WriteHeader(http.StatusSwitchingProtocols)
+
+ conn, bufrw, err := hijacker.Hijack()
+ if err != nil {
+ runtime.HandleError(fmt.Errorf("unable to upgrade: error hijacking response: %v", err))
+ return nil
+ }
+
+ connWithBuf := &connWrapper{Conn: conn, bufReader: bufrw.Reader}
+ spdyConn, err := NewServerConnection(connWithBuf, newStreamHandler)
+ if err != nil {
+ runtime.HandleError(fmt.Errorf("unable to upgrade: error creating SPDY server connection: %v", err))
+ return nil
+ }
+
+ return spdyConn
+}
diff --git a/vendor/k8s.io/apimachinery/third_party/forked/golang/netutil/addr.go b/vendor/k8s.io/apimachinery/third_party/forked/golang/netutil/addr.go
new file mode 100644
index 00000000000..c70f431c272
--- /dev/null
+++ b/vendor/k8s.io/apimachinery/third_party/forked/golang/netutil/addr.go
@@ -0,0 +1,27 @@
+package netutil
+
+import (
+ "net/url"
+ "strings"
+)
+
+// FROM: http://golang.org/src/net/http/client.go
+// Given a string of the form "host", "host:port", or "[ipv6::address]:port",
+// return true if the string includes a port.
+func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") }
+
+// FROM: http://golang.org/src/net/http/transport.go
+var portMap = map[string]string{
+ "http": "80",
+ "https": "443",
+}
+
+// FROM: http://golang.org/src/net/http/transport.go
+// canonicalAddr returns url.Host but always with a ":port" suffix
+func CanonicalAddr(url *url.URL) string {
+ addr := url.Host
+ if !hasPort(addr) {
+ return addr + ":" + portMap[url.Scheme]
+ }
+ return addr
+}
diff --git a/vendor/k8s.io/client-go/tools/portforward/doc.go b/vendor/k8s.io/client-go/tools/portforward/doc.go
new file mode 100644
index 00000000000..2f53406344f
--- /dev/null
+++ b/vendor/k8s.io/client-go/tools/portforward/doc.go
@@ -0,0 +1,19 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package portforward adds support for SSH-like port forwarding from the client's
+// local host to remote containers.
+package portforward // import "k8s.io/client-go/tools/portforward"
diff --git a/vendor/k8s.io/client-go/tools/portforward/portforward.go b/vendor/k8s.io/client-go/tools/portforward/portforward.go
new file mode 100644
index 00000000000..ffc0bcac7d4
--- /dev/null
+++ b/vendor/k8s.io/client-go/tools/portforward/portforward.go
@@ -0,0 +1,429 @@
+/*
+Copyright 2015 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package portforward
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+
+ "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/util/httpstream"
+ "k8s.io/apimachinery/pkg/util/runtime"
+)
+
+// PortForwardProtocolV1Name is the subprotocol used for port forwarding.
+// TODO move to API machinery and re-unify with kubelet/server/portfoward
+const PortForwardProtocolV1Name = "portforward.k8s.io"
+
+// PortForwarder knows how to listen for local connections and forward them to
+// a remote pod via an upgraded HTTP request.
+type PortForwarder struct {
+ addresses []listenAddress
+ ports []ForwardedPort
+ stopChan <-chan struct{}
+
+ dialer httpstream.Dialer
+ streamConn httpstream.Connection
+ listeners []io.Closer
+ Ready chan struct{}
+ requestIDLock sync.Mutex
+ requestID int
+ out io.Writer
+ errOut io.Writer
+}
+
+// ForwardedPort contains a Local:Remote port pairing.
+type ForwardedPort struct {
+ Local uint16
+ Remote uint16
+}
+
+/*
+ valid port specifications:
+
+ 5000
+ - forwards from localhost:5000 to pod:5000
+
+ 8888:5000
+ - forwards from localhost:8888 to pod:5000
+
+ 0:5000
+ :5000
+ - selects a random available local port,
+ forwards from localhost: to pod:5000
+*/
+func parsePorts(ports []string) ([]ForwardedPort, error) {
+ var forwards []ForwardedPort
+ for _, portString := range ports {
+ parts := strings.Split(portString, ":")
+ var localString, remoteString string
+ if len(parts) == 1 {
+ localString = parts[0]
+ remoteString = parts[0]
+ } else if len(parts) == 2 {
+ localString = parts[0]
+ if localString == "" {
+ // support :5000
+ localString = "0"
+ }
+ remoteString = parts[1]
+ } else {
+ return nil, fmt.Errorf("invalid port format '%s'", portString)
+ }
+
+ localPort, err := strconv.ParseUint(localString, 10, 16)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing local port '%s': %s", localString, err)
+ }
+
+ remotePort, err := strconv.ParseUint(remoteString, 10, 16)
+ if err != nil {
+ return nil, fmt.Errorf("error parsing remote port '%s': %s", remoteString, err)
+ }
+ if remotePort == 0 {
+ return nil, fmt.Errorf("remote port must be > 0")
+ }
+
+ forwards = append(forwards, ForwardedPort{uint16(localPort), uint16(remotePort)})
+ }
+
+ return forwards, nil
+}
+
+type listenAddress struct {
+ address string
+ protocol string
+ failureMode string
+}
+
+func parseAddresses(addressesToParse []string) ([]listenAddress, error) {
+ var addresses []listenAddress
+ parsed := make(map[string]listenAddress)
+ for _, address := range addressesToParse {
+ if address == "localhost" {
+ if _, exists := parsed["127.0.0.1"]; !exists {
+ ip := listenAddress{address: "127.0.0.1", protocol: "tcp4", failureMode: "all"}
+ parsed[ip.address] = ip
+ }
+ if _, exists := parsed["::1"]; !exists {
+ ip := listenAddress{address: "::1", protocol: "tcp6", failureMode: "all"}
+ parsed[ip.address] = ip
+ }
+ } else if net.ParseIP(address).To4() != nil {
+ parsed[address] = listenAddress{address: address, protocol: "tcp4", failureMode: "any"}
+ } else if net.ParseIP(address) != nil {
+ parsed[address] = listenAddress{address: address, protocol: "tcp6", failureMode: "any"}
+ } else {
+ return nil, fmt.Errorf("%s is not a valid IP", address)
+ }
+ }
+ addresses = make([]listenAddress, len(parsed))
+ id := 0
+ for _, v := range parsed {
+ addresses[id] = v
+ id++
+ }
+ // Sort addresses before returning to get a stable order
+ sort.Slice(addresses, func(i, j int) bool { return addresses[i].address < addresses[j].address })
+
+ return addresses, nil
+}
+
+// New creates a new PortForwarder with localhost listen addresses.
+func New(dialer httpstream.Dialer, ports []string, stopChan <-chan struct{}, readyChan chan struct{}, out, errOut io.Writer) (*PortForwarder, error) {
+ return NewOnAddresses(dialer, []string{"localhost"}, ports, stopChan, readyChan, out, errOut)
+}
+
+// NewOnAddresses creates a new PortForwarder with custom listen addresses.
+func NewOnAddresses(dialer httpstream.Dialer, addresses []string, ports []string, stopChan <-chan struct{}, readyChan chan struct{}, out, errOut io.Writer) (*PortForwarder, error) {
+ if len(addresses) == 0 {
+ return nil, errors.New("you must specify at least 1 address")
+ }
+ parsedAddresses, err := parseAddresses(addresses)
+ if err != nil {
+ return nil, err
+ }
+ if len(ports) == 0 {
+ return nil, errors.New("you must specify at least 1 port")
+ }
+ parsedPorts, err := parsePorts(ports)
+ if err != nil {
+ return nil, err
+ }
+ return &PortForwarder{
+ dialer: dialer,
+ addresses: parsedAddresses,
+ ports: parsedPorts,
+ stopChan: stopChan,
+ Ready: readyChan,
+ out: out,
+ errOut: errOut,
+ }, nil
+}
+
+// ForwardPorts formats and executes a port forwarding request. The connection will remain
+// open until stopChan is closed.
+func (pf *PortForwarder) ForwardPorts() error {
+ defer pf.Close()
+
+ var err error
+ pf.streamConn, _, err = pf.dialer.Dial(PortForwardProtocolV1Name)
+ if err != nil {
+ return fmt.Errorf("error upgrading connection: %s", err)
+ }
+ defer pf.streamConn.Close()
+
+ return pf.forward()
+}
+
+// forward dials the remote host specific in req, upgrades the request, starts
+// listeners for each port specified in ports, and forwards local connections
+// to the remote host via streams.
+func (pf *PortForwarder) forward() error {
+ var err error
+
+ listenSuccess := false
+ for i := range pf.ports {
+ port := &pf.ports[i]
+ err = pf.listenOnPort(port)
+ switch {
+ case err == nil:
+ listenSuccess = true
+ default:
+ if pf.errOut != nil {
+ fmt.Fprintf(pf.errOut, "Unable to listen on port %d: %v\n", port.Local, err)
+ }
+ }
+ }
+
+ if !listenSuccess {
+ return fmt.Errorf("unable to listen on any of the requested ports: %v", pf.ports)
+ }
+
+ if pf.Ready != nil {
+ close(pf.Ready)
+ }
+
+ // wait for interrupt or conn closure
+ select {
+ case <-pf.stopChan:
+ case <-pf.streamConn.CloseChan():
+ runtime.HandleError(errors.New("lost connection to pod"))
+ }
+
+ return nil
+}
+
+// listenOnPort delegates listener creation and waits for connections on requested bind addresses.
+// An error is raised based on address groups (default and localhost) and their failure modes
+func (pf *PortForwarder) listenOnPort(port *ForwardedPort) error {
+ var errors []error
+ failCounters := make(map[string]int, 2)
+ successCounters := make(map[string]int, 2)
+ for _, addr := range pf.addresses {
+ err := pf.listenOnPortAndAddress(port, addr.protocol, addr.address)
+ if err != nil {
+ errors = append(errors, err)
+ failCounters[addr.failureMode]++
+ } else {
+ successCounters[addr.failureMode]++
+ }
+ }
+ if successCounters["all"] == 0 && failCounters["all"] > 0 {
+ return fmt.Errorf("%s: %v", "Listeners failed to create with the following errors", errors)
+ }
+ if failCounters["any"] > 0 {
+ return fmt.Errorf("%s: %v", "Listeners failed to create with the following errors", errors)
+ }
+ return nil
+}
+
+// listenOnPortAndAddress delegates listener creation and waits for new connections
+// in the background f
+func (pf *PortForwarder) listenOnPortAndAddress(port *ForwardedPort, protocol string, address string) error {
+ listener, err := pf.getListener(protocol, address, port)
+ if err != nil {
+ return err
+ }
+ pf.listeners = append(pf.listeners, listener)
+ go pf.waitForConnection(listener, *port)
+ return nil
+}
+
+// getListener creates a listener on the interface targeted by the given hostname on the given port with
+// the given protocol. protocol is in net.Listen style which basically admits values like tcp, tcp4, tcp6
+func (pf *PortForwarder) getListener(protocol string, hostname string, port *ForwardedPort) (net.Listener, error) {
+ listener, err := net.Listen(protocol, net.JoinHostPort(hostname, strconv.Itoa(int(port.Local))))
+ if err != nil {
+ return nil, fmt.Errorf("unable to create listener: Error %s", err)
+ }
+ listenerAddress := listener.Addr().String()
+ host, localPort, _ := net.SplitHostPort(listenerAddress)
+ localPortUInt, err := strconv.ParseUint(localPort, 10, 16)
+
+ if err != nil {
+ fmt.Fprintf(pf.out, "Failed to forward from %s:%d -> %d\n", hostname, localPortUInt, port.Remote)
+ return nil, fmt.Errorf("error parsing local port: %s from %s (%s)", err, listenerAddress, host)
+ }
+ port.Local = uint16(localPortUInt)
+ if pf.out != nil {
+ fmt.Fprintf(pf.out, "Forwarding from %s -> %d\n", net.JoinHostPort(hostname, strconv.Itoa(int(localPortUInt))), port.Remote)
+ }
+
+ return listener, nil
+}
+
+// waitForConnection waits for new connections to listener and handles them in
+// the background.
+func (pf *PortForwarder) waitForConnection(listener net.Listener, port ForwardedPort) {
+ for {
+ conn, err := listener.Accept()
+ if err != nil {
+ // TODO consider using something like https://github.com/hydrogen18/stoppableListener?
+ if !strings.Contains(strings.ToLower(err.Error()), "use of closed network connection") {
+ runtime.HandleError(fmt.Errorf("error accepting connection on port %d: %v", port.Local, err))
+ }
+ return
+ }
+ go pf.handleConnection(conn, port)
+ }
+}
+
+func (pf *PortForwarder) nextRequestID() int {
+ pf.requestIDLock.Lock()
+ defer pf.requestIDLock.Unlock()
+ id := pf.requestID
+ pf.requestID++
+ return id
+}
+
+// handleConnection copies data between the local connection and the stream to
+// the remote server.
+func (pf *PortForwarder) handleConnection(conn net.Conn, port ForwardedPort) {
+ defer conn.Close()
+
+ if pf.out != nil {
+ fmt.Fprintf(pf.out, "Handling connection for %d\n", port.Local)
+ }
+
+ requestID := pf.nextRequestID()
+
+ // create error stream
+ headers := http.Header{}
+ headers.Set(v1.StreamType, v1.StreamTypeError)
+ headers.Set(v1.PortHeader, fmt.Sprintf("%d", port.Remote))
+ headers.Set(v1.PortForwardRequestIDHeader, strconv.Itoa(requestID))
+ errorStream, err := pf.streamConn.CreateStream(headers)
+ if err != nil {
+ runtime.HandleError(fmt.Errorf("error creating error stream for port %d -> %d: %v", port.Local, port.Remote, err))
+ return
+ }
+ // we're not writing to this stream
+ errorStream.Close()
+
+ errorChan := make(chan error)
+ go func() {
+ message, err := ioutil.ReadAll(errorStream)
+ switch {
+ case err != nil:
+ errorChan <- fmt.Errorf("error reading from error stream for port %d -> %d: %v", port.Local, port.Remote, err)
+ case len(message) > 0:
+ errorChan <- fmt.Errorf("an error occurred forwarding %d -> %d: %v", port.Local, port.Remote, string(message))
+ }
+ close(errorChan)
+ }()
+
+ // create data stream
+ headers.Set(v1.StreamType, v1.StreamTypeData)
+ dataStream, err := pf.streamConn.CreateStream(headers)
+ if err != nil {
+ runtime.HandleError(fmt.Errorf("error creating forwarding stream for port %d -> %d: %v", port.Local, port.Remote, err))
+ return
+ }
+
+ localError := make(chan struct{})
+ remoteDone := make(chan struct{})
+
+ go func() {
+ // Copy from the remote side to the local port.
+ if _, err := io.Copy(conn, dataStream); err != nil && !strings.Contains(err.Error(), "use of closed network connection") {
+ runtime.HandleError(fmt.Errorf("error copying from remote stream to local connection: %v", err))
+ }
+
+ // inform the select below that the remote copy is done
+ close(remoteDone)
+ }()
+
+ go func() {
+ // inform server we're not sending any more data after copy unblocks
+ defer dataStream.Close()
+
+ // Copy from the local port to the remote side.
+ if _, err := io.Copy(dataStream, conn); err != nil && !strings.Contains(err.Error(), "use of closed network connection") {
+ runtime.HandleError(fmt.Errorf("error copying from local connection to remote stream: %v", err))
+ // break out of the select below without waiting for the other copy to finish
+ close(localError)
+ }
+ }()
+
+ // wait for either a local->remote error or for copying from remote->local to finish
+ select {
+ case <-remoteDone:
+ case <-localError:
+ }
+
+ // always expect something on errorChan (it may be nil)
+ err = <-errorChan
+ if err != nil {
+ runtime.HandleError(err)
+ }
+}
+
+// Close stops all listeners of PortForwarder.
+func (pf *PortForwarder) Close() {
+ // stop all listeners
+ for _, l := range pf.listeners {
+ if err := l.Close(); err != nil {
+ runtime.HandleError(fmt.Errorf("error closing listener: %v", err))
+ }
+ }
+}
+
+// GetPorts will return the ports that were forwarded; this can be used to
+// retrieve the locally-bound port in cases where the input was port 0. This
+// function will signal an error if the Ready channel is nil or if the
+// listeners are not ready yet; this function will succeed after the Ready
+// channel has been closed.
+func (pf *PortForwarder) GetPorts() ([]ForwardedPort, error) {
+ if pf.Ready == nil {
+ return nil, fmt.Errorf("no Ready channel provided")
+ }
+ select {
+ case <-pf.Ready:
+ return pf.ports, nil
+ default:
+ return nil, fmt.Errorf("listeners not ready")
+ }
+}
diff --git a/vendor/k8s.io/client-go/transport/spdy/spdy.go b/vendor/k8s.io/client-go/transport/spdy/spdy.go
new file mode 100644
index 00000000000..53cc7ee18c5
--- /dev/null
+++ b/vendor/k8s.io/client-go/transport/spdy/spdy.go
@@ -0,0 +1,94 @@
+/*
+Copyright 2017 The Kubernetes Authors.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package spdy
+
+import (
+ "fmt"
+ "net/http"
+ "net/url"
+
+ "k8s.io/apimachinery/pkg/util/httpstream"
+ "k8s.io/apimachinery/pkg/util/httpstream/spdy"
+ restclient "k8s.io/client-go/rest"
+)
+
+// Upgrader validates a response from the server after a SPDY upgrade.
+type Upgrader interface {
+ // NewConnection validates the response and creates a new Connection.
+ NewConnection(resp *http.Response) (httpstream.Connection, error)
+}
+
+// RoundTripperFor returns a round tripper and upgrader to use with SPDY.
+func RoundTripperFor(config *restclient.Config) (http.RoundTripper, Upgrader, error) {
+ tlsConfig, err := restclient.TLSConfigFor(config)
+ if err != nil {
+ return nil, nil, err
+ }
+ upgradeRoundTripper := spdy.NewRoundTripper(tlsConfig, true, false)
+ wrapper, err := restclient.HTTPWrappersForConfig(config, upgradeRoundTripper)
+ if err != nil {
+ return nil, nil, err
+ }
+ return wrapper, upgradeRoundTripper, nil
+}
+
+// dialer implements the httpstream.Dialer interface.
+type dialer struct {
+ client *http.Client
+ upgrader Upgrader
+ method string
+ url *url.URL
+}
+
+var _ httpstream.Dialer = &dialer{}
+
+// NewDialer will create a dialer that connects to the provided URL and upgrades the connection to SPDY.
+func NewDialer(upgrader Upgrader, client *http.Client, method string, url *url.URL) httpstream.Dialer {
+ return &dialer{
+ client: client,
+ upgrader: upgrader,
+ method: method,
+ url: url,
+ }
+}
+
+func (d *dialer) Dial(protocols ...string) (httpstream.Connection, string, error) {
+ req, err := http.NewRequest(d.method, d.url.String(), nil)
+ if err != nil {
+ return nil, "", fmt.Errorf("error creating request: %v", err)
+ }
+ return Negotiate(d.upgrader, d.client, req, protocols...)
+}
+
+// Negotiate opens a connection to a remote server and attempts to negotiate
+// a SPDY connection. Upon success, it returns the connection and the protocol selected by
+// the server. The client transport must use the upgradeRoundTripper - see RoundTripperFor.
+func Negotiate(upgrader Upgrader, client *http.Client, req *http.Request, protocols ...string) (httpstream.Connection, string, error) {
+ for i := range protocols {
+ req.Header.Add(httpstream.HeaderProtocolVersion, protocols[i])
+ }
+ resp, err := client.Do(req)
+ if err != nil {
+ return nil, "", fmt.Errorf("error sending request: %v", err)
+ }
+ defer resp.Body.Close()
+ conn, err := upgrader.NewConnection(resp)
+ if err != nil {
+ return nil, "", err
+ }
+ return conn, resp.Header.Get(httpstream.HeaderProtocolVersion), nil
+}
diff --git a/vendor/knative.dev/pkg/apis/contexts.go b/vendor/knative.dev/pkg/apis/contexts.go
index 4e8b60d6a47..b71878fd081 100644
--- a/vendor/knative.dev/pkg/apis/contexts.go
+++ b/vendor/knative.dev/pkg/apis/contexts.go
@@ -214,8 +214,7 @@ func IsDifferentNamespaceAllowed(ctx context.Context) bool {
return ctx.Value(allowDifferentNamespace{}) != nil
}
-// This is attached to contexts passed to webhook interfaces when the user
-// has requested DryRun mode.
+// This is attached to contexts passed to webhook interfaces when the user has request DryRun mode.
type isDryRun struct{}
// WithDryRun is used to indicate that this call is in DryRun mode.
diff --git a/vendor/knative.dev/pkg/apis/duck/proxy.go b/vendor/knative.dev/pkg/apis/duck/proxy.go
new file mode 100644
index 00000000000..85a795c361a
--- /dev/null
+++ b/vendor/knative.dev/pkg/apis/duck/proxy.go
@@ -0,0 +1,74 @@
+/*
+Copyright 2018 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package duck
+
+import (
+ "sync"
+
+ "k8s.io/apimachinery/pkg/watch"
+)
+
+// NewProxyWatcher is based on the same concept from Kubernetes apimachinery in 1.12 here:
+// https://github.com/kubernetes/apimachinery/blob/c6dd271be/pkg/watch/watch.go#L272
+// Replace this copy once we've update our client libraries.
+
+// proxyWatcher lets you wrap your channel in watch.Interface. Threadsafe.
+type proxyWatcher struct {
+ result chan watch.Event
+ stopCh chan struct{}
+
+ mutex sync.Mutex
+ stopped bool
+}
+
+var _ watch.Interface = (*proxyWatcher)(nil)
+
+// NewProxyWatcher creates new proxyWatcher by wrapping a channel
+func NewProxyWatcher(ch chan watch.Event) watch.Interface {
+ return &proxyWatcher{
+ result: ch,
+ stopCh: make(chan struct{}),
+ stopped: false,
+ }
+}
+
+// Stop implements Interface
+func (pw *proxyWatcher) Stop() {
+ pw.mutex.Lock()
+ defer pw.mutex.Unlock()
+ if !pw.stopped {
+ pw.stopped = true
+ close(pw.stopCh)
+ }
+}
+
+// Stopping returns true if Stop() has been called
+func (pw *proxyWatcher) Stopping() bool {
+ pw.mutex.Lock()
+ defer pw.mutex.Unlock()
+ return pw.stopped
+}
+
+// ResultChan implements watch.Interface
+func (pw *proxyWatcher) ResultChan() <-chan watch.Event {
+ return pw.result
+}
+
+// StopChan returns stop channel
+func (pw *proxyWatcher) StopChan() <-chan struct{} {
+ return pw.stopCh
+}
diff --git a/vendor/knative.dev/pkg/apis/duck/typed.go b/vendor/knative.dev/pkg/apis/duck/typed.go
index d6fa034515e..5e1644a939d 100644
--- a/vendor/knative.dev/pkg/apis/duck/typed.go
+++ b/vendor/knative.dev/pkg/apis/duck/typed.go
@@ -134,6 +134,6 @@ func AsStructuredWatcher(wf cache.WatchFunc, obj runtime.Object) cache.WatchFunc
}
}()
- return watch.NewProxyWatcher(structuredCh), nil
+ return NewProxyWatcher(structuredCh), nil
}
}
diff --git a/vendor/knative.dev/pkg/apis/duck/v1/status_types.go b/vendor/knative.dev/pkg/apis/duck/v1/status_types.go
index bb662eeea34..5ea75769482 100644
--- a/vendor/knative.dev/pkg/apis/duck/v1/status_types.go
+++ b/vendor/knative.dev/pkg/apis/duck/v1/status_types.go
@@ -74,7 +74,7 @@ func (*Conditions) GetFullType() ducktypes.Populatable {
return &KResource{}
}
-// GetCondition fetches a copy of the condition of the specified type.
+// GetCondition fetches the condition of the specified type.
func (s *Status) GetCondition(t apis.ConditionType) *apis.Condition {
for _, cond := range s.Conditions {
if cond.Type == t {
diff --git a/vendor/knative.dev/pkg/apis/volatile_time.go b/vendor/knative.dev/pkg/apis/volatile_time.go
index b114294bf9b..d735ada99d1 100644
--- a/vendor/knative.dev/pkg/apis/volatile_time.go
+++ b/vendor/knative.dev/pkg/apis/volatile_time.go
@@ -29,7 +29,7 @@ import (
// Note, go-cmp will still return inequality, see unit test if you
// need this behavior for go-cmp.
type VolatileTime struct {
- Inner metav1.Time `json:",inline"`
+ Inner metav1.Time
}
// MarshalJSON implements the json.Marshaler interface.
diff --git a/vendor/knative.dev/pkg/client/injection/apiextensions/reconciler/apiextensions/v1/customresourcedefinition/reconciler.go b/vendor/knative.dev/pkg/client/injection/apiextensions/reconciler/apiextensions/v1/customresourcedefinition/reconciler.go
index db9df67def8..b4b733093a2 100644
--- a/vendor/knative.dev/pkg/client/injection/apiextensions/reconciler/apiextensions/v1/customresourcedefinition/reconciler.go
+++ b/vendor/knative.dev/pkg/client/injection/apiextensions/reconciler/apiextensions/v1/customresourcedefinition/reconciler.go
@@ -37,7 +37,6 @@ import (
sets "k8s.io/apimachinery/pkg/util/sets"
record "k8s.io/client-go/tools/record"
controller "knative.dev/pkg/controller"
- kmp "knative.dev/pkg/kmp"
logging "knative.dev/pkg/logging"
reconciler "knative.dev/pkg/reconciler"
)
@@ -86,6 +85,13 @@ type ReadOnlyFinalizer interface {
type doReconcile func(ctx context.Context, o *v1.CustomResourceDefinition) reconciler.Event
+const (
+ doReconcileKind = "ReconcileKind"
+ doFinalizeKind = "FinalizeKind"
+ doObserveKind = "ObserveKind"
+ doObserveFinalizeKind = "ObserveFinalizeKind"
+)
+
// reconcilerImpl implements controller.Reconciler for v1.CustomResourceDefinition resources.
type reconcilerImpl struct {
// LeaderAwareFuncs is inlined to help us implement reconciler.LeaderAware
@@ -225,7 +231,7 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error {
// Append the target method to the logger.
logger = logger.With(zap.String("targetMethod", name))
switch name {
- case reconciler.DoReconcileKind:
+ case doReconcileKind:
// Append the target method to the logger.
logger = logger.With(zap.String("targetMethod", "ReconcileKind"))
@@ -239,7 +245,7 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error {
// updates regardless of whether the reconciliation errored out.
reconcileEvent = do(ctx, resource)
- case reconciler.DoFinalizeKind:
+ case "FinalizeKind":
// For finalizing reconcilers, if this resource being marked for deletion
// and reconciled cleanly (nil or normal event), remove the finalizer.
reconcileEvent = do(ctx, resource)
@@ -248,7 +254,7 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error {
return fmt.Errorf("failed to clear finalizers: %w", err)
}
- case reconciler.DoObserveKind, reconciler.DoObserveFinalizeKind:
+ case "ObserveKind", "ObserveFinalizeKind":
// Observe any changes to this resource, since we are not the leader.
reconcileEvent = do(ctx, resource)
@@ -269,7 +275,7 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error {
// the elected leader is expected to write modifications.
logger.Warn("Saw status changes when we aren't the leader!")
default:
- if err = r.updateStatus(ctx, original, resource); err != nil {
+ if err = r.updateStatus(original, resource); err != nil {
logger.Warnw("Failed to update resource status", zap.Error(err))
r.Recorder.Eventf(resource, corev1.EventTypeWarning, "UpdateFailed",
"Failed to update status for %q: %v", resource.Name, err)
@@ -299,7 +305,7 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error {
return nil
}
-func (r *reconcilerImpl) updateStatus(ctx context.Context, existing *v1.CustomResourceDefinition, desired *v1.CustomResourceDefinition) error {
+func (r *reconcilerImpl) updateStatus(existing *v1.CustomResourceDefinition, desired *v1.CustomResourceDefinition) error {
existing = existing.DeepCopy()
return reconciler.RetryUpdateConflicts(func(attempts int) (err error) {
// The first iteration tries to use the injectionInformer's state, subsequent attempts fetch the latest state via API.
@@ -318,10 +324,6 @@ func (r *reconcilerImpl) updateStatus(ctx context.Context, existing *v1.CustomRe
return nil
}
- if diff, err := kmp.SafeDiff(existing.Status, desired.Status); err == nil && diff != "" {
- logging.FromContext(ctx).Debugf("Updating status with: %s", diff)
- }
-
existing.Status = desired.Status
updater := r.Client.ApiextensionsV1().CustomResourceDefinitions()
diff --git a/vendor/knative.dev/pkg/client/injection/apiextensions/reconciler/apiextensions/v1/customresourcedefinition/state.go b/vendor/knative.dev/pkg/client/injection/apiextensions/reconciler/apiextensions/v1/customresourcedefinition/state.go
index 01839c691d7..f25bba83288 100644
--- a/vendor/knative.dev/pkg/client/injection/apiextensions/reconciler/apiextensions/v1/customresourcedefinition/state.go
+++ b/vendor/knative.dev/pkg/client/injection/apiextensions/reconciler/apiextensions/v1/customresourcedefinition/state.go
@@ -24,7 +24,6 @@ import (
v1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
types "k8s.io/apimachinery/pkg/types"
cache "k8s.io/client-go/tools/cache"
- reconciler "knative.dev/pkg/reconciler"
)
// state is used to track the state of a reconciler in a single run.
@@ -93,14 +92,14 @@ func (s *state) isNotLeaderNorObserver() bool {
func (s *state) reconcileMethodFor(o *v1.CustomResourceDefinition) (string, doReconcile) {
if o.GetDeletionTimestamp().IsZero() {
if s.isLeader {
- return reconciler.DoReconcileKind, s.reconciler.ReconcileKind
+ return doReconcileKind, s.reconciler.ReconcileKind
} else if s.isROI {
- return reconciler.DoObserveKind, s.roi.ObserveKind
+ return doObserveKind, s.roi.ObserveKind
}
} else if fin, ok := s.reconciler.(Finalizer); s.isLeader && ok {
- return reconciler.DoFinalizeKind, fin.FinalizeKind
+ return doFinalizeKind, fin.FinalizeKind
} else if !s.isLeader && s.isROF {
- return reconciler.DoObserveFinalizeKind, s.rof.ObserveFinalizeKind
+ return doObserveFinalizeKind, s.rof.ObserveFinalizeKind
}
return "unknown", nil
}
diff --git a/vendor/knative.dev/pkg/client/injection/kube/reconciler/core/v1/namespace/reconciler.go b/vendor/knative.dev/pkg/client/injection/kube/reconciler/core/v1/namespace/reconciler.go
index 982d8acc4bd..17dd45424e4 100644
--- a/vendor/knative.dev/pkg/client/injection/kube/reconciler/core/v1/namespace/reconciler.go
+++ b/vendor/knative.dev/pkg/client/injection/kube/reconciler/core/v1/namespace/reconciler.go
@@ -36,7 +36,6 @@ import (
corev1 "k8s.io/client-go/listers/core/v1"
record "k8s.io/client-go/tools/record"
controller "knative.dev/pkg/controller"
- kmp "knative.dev/pkg/kmp"
logging "knative.dev/pkg/logging"
reconciler "knative.dev/pkg/reconciler"
)
@@ -85,6 +84,13 @@ type ReadOnlyFinalizer interface {
type doReconcile func(ctx context.Context, o *v1.Namespace) reconciler.Event
+const (
+ doReconcileKind = "ReconcileKind"
+ doFinalizeKind = "FinalizeKind"
+ doObserveKind = "ObserveKind"
+ doObserveFinalizeKind = "ObserveFinalizeKind"
+)
+
// reconcilerImpl implements controller.Reconciler for v1.Namespace resources.
type reconcilerImpl struct {
// LeaderAwareFuncs is inlined to help us implement reconciler.LeaderAware
@@ -224,7 +230,7 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error {
// Append the target method to the logger.
logger = logger.With(zap.String("targetMethod", name))
switch name {
- case reconciler.DoReconcileKind:
+ case doReconcileKind:
// Append the target method to the logger.
logger = logger.With(zap.String("targetMethod", "ReconcileKind"))
@@ -238,7 +244,7 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error {
// updates regardless of whether the reconciliation errored out.
reconcileEvent = do(ctx, resource)
- case reconciler.DoFinalizeKind:
+ case "FinalizeKind":
// For finalizing reconcilers, if this resource being marked for deletion
// and reconciled cleanly (nil or normal event), remove the finalizer.
reconcileEvent = do(ctx, resource)
@@ -247,7 +253,7 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error {
return fmt.Errorf("failed to clear finalizers: %w", err)
}
- case reconciler.DoObserveKind, reconciler.DoObserveFinalizeKind:
+ case "ObserveKind", "ObserveFinalizeKind":
// Observe any changes to this resource, since we are not the leader.
reconcileEvent = do(ctx, resource)
@@ -268,7 +274,7 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error {
// the elected leader is expected to write modifications.
logger.Warn("Saw status changes when we aren't the leader!")
default:
- if err = r.updateStatus(ctx, original, resource); err != nil {
+ if err = r.updateStatus(original, resource); err != nil {
logger.Warnw("Failed to update resource status", zap.Error(err))
r.Recorder.Eventf(resource, v1.EventTypeWarning, "UpdateFailed",
"Failed to update status for %q: %v", resource.Name, err)
@@ -298,7 +304,7 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, key string) error {
return nil
}
-func (r *reconcilerImpl) updateStatus(ctx context.Context, existing *v1.Namespace, desired *v1.Namespace) error {
+func (r *reconcilerImpl) updateStatus(existing *v1.Namespace, desired *v1.Namespace) error {
existing = existing.DeepCopy()
return reconciler.RetryUpdateConflicts(func(attempts int) (err error) {
// The first iteration tries to use the injectionInformer's state, subsequent attempts fetch the latest state via API.
@@ -317,10 +323,6 @@ func (r *reconcilerImpl) updateStatus(ctx context.Context, existing *v1.Namespac
return nil
}
- if diff, err := kmp.SafeDiff(existing.Status, desired.Status); err == nil && diff != "" {
- logging.FromContext(ctx).Debugf("Updating status with: %s", diff)
- }
-
existing.Status = desired.Status
updater := r.Client.CoreV1().Namespaces()
diff --git a/vendor/knative.dev/pkg/client/injection/kube/reconciler/core/v1/namespace/state.go b/vendor/knative.dev/pkg/client/injection/kube/reconciler/core/v1/namespace/state.go
index bc383a76b9b..2370803c947 100644
--- a/vendor/knative.dev/pkg/client/injection/kube/reconciler/core/v1/namespace/state.go
+++ b/vendor/knative.dev/pkg/client/injection/kube/reconciler/core/v1/namespace/state.go
@@ -24,7 +24,6 @@ import (
v1 "k8s.io/api/core/v1"
types "k8s.io/apimachinery/pkg/types"
cache "k8s.io/client-go/tools/cache"
- reconciler "knative.dev/pkg/reconciler"
)
// state is used to track the state of a reconciler in a single run.
@@ -93,14 +92,14 @@ func (s *state) isNotLeaderNorObserver() bool {
func (s *state) reconcileMethodFor(o *v1.Namespace) (string, doReconcile) {
if o.GetDeletionTimestamp().IsZero() {
if s.isLeader {
- return reconciler.DoReconcileKind, s.reconciler.ReconcileKind
+ return doReconcileKind, s.reconciler.ReconcileKind
} else if s.isROI {
- return reconciler.DoObserveKind, s.roi.ObserveKind
+ return doObserveKind, s.roi.ObserveKind
}
} else if fin, ok := s.reconciler.(Finalizer); s.isLeader && ok {
- return reconciler.DoFinalizeKind, fin.FinalizeKind
+ return doFinalizeKind, fin.FinalizeKind
} else if !s.isLeader && s.isROF {
- return reconciler.DoObserveFinalizeKind, s.rof.ObserveFinalizeKind
+ return doObserveFinalizeKind, s.rof.ObserveFinalizeKind
}
return "unknown", nil
}
diff --git a/vendor/knative.dev/pkg/codegen/cmd/injection-gen/generators/reconciler_reconciler.go b/vendor/knative.dev/pkg/codegen/cmd/injection-gen/generators/reconciler_reconciler.go
index ac8138188d2..e05d78f2059 100644
--- a/vendor/knative.dev/pkg/codegen/cmd/injection-gen/generators/reconciler_reconciler.go
+++ b/vendor/knative.dev/pkg/codegen/cmd/injection-gen/generators/reconciler_reconciler.go
@@ -147,10 +147,6 @@ func (g *reconcilerReconcilerGenerator) GenerateType(c *generator.Context, t *ty
Package: "context",
Name: "Context",
}),
- "kmpSafeDiff": c.Universe.Function(types.Name{
- Package: "knative.dev/pkg/kmp",
- Name: "SafeDiff",
- }),
"fmtErrorf": c.Universe.Package("fmt").Function("Errorf"),
"reflectDeepEqual": c.Universe.Package("reflect").Function("DeepEqual"),
"equalitySemantic": c.Universe.Package("k8s.io/apimachinery/pkg/api/equality").Variable("Semantic"),
@@ -180,22 +176,6 @@ func (g *reconcilerReconcilerGenerator) GenerateType(c *generator.Context, t *ty
Package: "k8s.io/apimachinery/pkg/labels",
Name: "Everything",
}),
- "doReconcileKind": c.Universe.Type(types.Name{
- Package: "knative.dev/pkg/reconciler",
- Name: "DoReconcileKind",
- }),
- "doObserveKind": c.Universe.Type(types.Name{
- Package: "knative.dev/pkg/reconciler",
- Name: "DoObserveKind",
- }),
- "doFinalizeKind": c.Universe.Type(types.Name{
- Package: "knative.dev/pkg/reconciler",
- Name: "DoFinalizeKind",
- }),
- "doObserveFinalizeKind": c.Universe.Type(types.Name{
- Package: "knative.dev/pkg/reconciler",
- Name: "DoObserveFinalizeKind",
- }),
}
sw.Do(reconcilerInterfaceFactory, m)
@@ -252,6 +232,13 @@ type ReadOnlyFinalizer interface {
type doReconcile func(ctx {{.contextContext|raw}}, o *{{.type|raw}}) {{.reconcilerEvent|raw}}
+const (
+ doReconcileKind = "ReconcileKind"
+ doFinalizeKind = "FinalizeKind"
+ doObserveKind = "ObserveKind"
+ doObserveFinalizeKind = "ObserveFinalizeKind"
+)
+
// reconcilerImpl implements controller.Reconciler for {{.type|raw}} resources.
type reconcilerImpl struct {
// LeaderAwareFuncs is inlined to help us implement {{.reconcilerLeaderAware|raw}}
@@ -354,7 +341,7 @@ var reconcilerImplFactory = `
func (r *reconcilerImpl) Reconcile(ctx {{.contextContext|raw}}, key string) error {
logger := {{.loggingFromContext|raw}}(ctx)
- // Initialize the reconciler state. This will convert the namespace/name
+ // Initialize the reconciler state. This will convert the namespace/name
// string into a distinct namespace and name, determin if this instance of
// the reconciler is the leader, and any additional interfaces implemented
// by the reconciler. Returns an error is the resource key is invalid.
@@ -363,7 +350,7 @@ func (r *reconcilerImpl) Reconcile(ctx {{.contextContext|raw}}, key string) erro
logger.Errorf("invalid resource key: %s", key)
return nil
}
-
+
// If we are not the leader, and we don't implement either ReadOnly
// observer interfaces, then take a fast-path out.
if s.isNotLeaderNorObserver() {
@@ -411,7 +398,7 @@ func (r *reconcilerImpl) Reconcile(ctx {{.contextContext|raw}}, key string) erro
// Append the target method to the logger.
logger = logger.With(zap.String("targetMethod", name))
switch name {
- case {{.doReconcileKind|raw}}:
+ case doReconcileKind:
// Append the target method to the logger.
logger = logger.With(zap.String("targetMethod", "ReconcileKind"))
@@ -432,7 +419,7 @@ func (r *reconcilerImpl) Reconcile(ctx {{.contextContext|raw}}, key string) erro
reconciler.PostProcessReconcile(ctx, resource, original)
{{end}}
- case {{.doFinalizeKind|raw}}:
+ case "FinalizeKind":
// For finalizing reconcilers, if this resource being marked for deletion
// and reconciled cleanly (nil or normal event), remove the finalizer.
reconcileEvent = do(ctx, resource)
@@ -441,7 +428,7 @@ func (r *reconcilerImpl) Reconcile(ctx {{.contextContext|raw}}, key string) erro
return {{.fmtErrorf|raw}}("failed to clear finalizers: %w", err)
}
- case {{.doObserveKind|raw}}, {{.doObserveFinalizeKind|raw}}:
+ case "ObserveKind", "ObserveFinalizeKind":
// Observe any changes to this resource, since we are not the leader.
reconcileEvent = do(ctx, resource)
@@ -462,7 +449,7 @@ func (r *reconcilerImpl) Reconcile(ctx {{.contextContext|raw}}, key string) erro
// the elected leader is expected to write modifications.
logger.Warn("Saw status changes when we aren't the leader!")
default:
- if err = r.updateStatus(ctx, original, resource); err != nil {
+ if err = r.updateStatus(original, resource); err != nil {
logger.Warnw("Failed to update resource status", zap.Error(err))
r.Recorder.Eventf(resource, {{.corev1EventTypeWarning|raw}}, "UpdateFailed",
"Failed to update status for %q: %v", resource.Name, err)
@@ -494,7 +481,7 @@ func (r *reconcilerImpl) Reconcile(ctx {{.contextContext|raw}}, key string) erro
`
var reconcilerStatusFactory = `
-func (r *reconcilerImpl) updateStatus(ctx {{.contextContext|raw}}, existing *{{.type|raw}}, desired *{{.type|raw}}) error {
+func (r *reconcilerImpl) updateStatus(existing *{{.type|raw}}, desired *{{.type|raw}}) error {
existing = existing.DeepCopy()
return {{.reconcilerRetryUpdateConflicts|raw}}(func(attempts int) (err error) {
// The first iteration tries to use the injectionInformer's state, subsequent attempts fetch the latest state via API.
@@ -515,10 +502,6 @@ func (r *reconcilerImpl) updateStatus(ctx {{.contextContext|raw}}, existing *{{.
return nil
}
- if diff, err := {{.kmpSafeDiff|raw}}(existing.Status, desired.Status); err == nil && diff != "" {
- {{.loggingFromContext|raw}}(ctx).Debugf("Updating status with: %s", diff)
- }
-
existing.Status = desired.Status
{{if .nonNamespaced}}
diff --git a/vendor/knative.dev/pkg/codegen/cmd/injection-gen/generators/reconciler_state.go b/vendor/knative.dev/pkg/codegen/cmd/injection-gen/generators/reconciler_state.go
index 396e24a20af..0e939a703c4 100644
--- a/vendor/knative.dev/pkg/codegen/cmd/injection-gen/generators/reconciler_state.go
+++ b/vendor/knative.dev/pkg/codegen/cmd/injection-gen/generators/reconciler_state.go
@@ -72,22 +72,6 @@ func (g *reconcilerStateGenerator) GenerateType(c *generator.Context, t *types.T
Package: "k8s.io/apimachinery/pkg/types",
Name: "NamespacedName",
}),
- "doReconcileKind": c.Universe.Type(types.Name{
- Package: "knative.dev/pkg/reconciler",
- Name: "DoReconcileKind",
- }),
- "doObserveKind": c.Universe.Type(types.Name{
- Package: "knative.dev/pkg/reconciler",
- Name: "DoObserveKind",
- }),
- "doFinalizeKind": c.Universe.Type(types.Name{
- Package: "knative.dev/pkg/reconciler",
- Name: "DoFinalizeKind",
- }),
- "doObserveFinalizeKind": c.Universe.Type(types.Name{
- Package: "knative.dev/pkg/reconciler",
- Name: "DoObserveFinalizeKind",
- }),
}
sw.Do(reconcilerStateType, m)
@@ -165,14 +149,14 @@ func (s *state) isNotLeaderNorObserver() bool {
func (s *state) reconcileMethodFor(o *{{.type|raw}}) (string, doReconcile) {
if o.GetDeletionTimestamp().IsZero() {
if s.isLeader {
- return {{.doReconcileKind|raw}}, s.reconciler.ReconcileKind
+ return doReconcileKind, s.reconciler.ReconcileKind
} else if s.isROI {
- return {{.doObserveKind|raw}}, s.roi.ObserveKind
+ return doObserveKind, s.roi.ObserveKind
}
} else if fin, ok := s.reconciler.(Finalizer); s.isLeader && ok {
- return {{.doFinalizeKind|raw}}, fin.FinalizeKind
+ return doFinalizeKind, fin.FinalizeKind
} else if !s.isLeader && s.isROF {
- return {{.doObserveFinalizeKind|raw}}, s.rof.ObserveFinalizeKind
+ return doObserveFinalizeKind, s.rof.ObserveFinalizeKind
}
return "unknown", nil
}
diff --git a/vendor/knative.dev/pkg/configmap/informed_watcher.go b/vendor/knative.dev/pkg/configmap/informed_watcher.go
index 16447b63401..3231e7d287c 100644
--- a/vendor/knative.dev/pkg/configmap/informed_watcher.go
+++ b/vendor/knative.dev/pkg/configmap/informed_watcher.go
@@ -33,6 +33,13 @@ import (
"k8s.io/client-go/tools/cache"
)
+// NewDefaultWatcher creates a new default configmap.Watcher instance.
+//
+// Deprecated: Use NewInformedWatcher
+func NewDefaultWatcher(kc kubernetes.Interface, namespace string) *InformedWatcher {
+ return NewInformedWatcher(kc, namespace)
+}
+
// NewInformedWatcherFromFactory watches a Kubernetes namespace for ConfigMap changes.
func NewInformedWatcherFromFactory(sif informers.SharedInformerFactory, namespace string) *InformedWatcher {
return &InformedWatcher{
diff --git a/vendor/knative.dev/pkg/configmap/parse.go b/vendor/knative.dev/pkg/configmap/parse.go
index eba04a11ca7..1ef67581810 100644
--- a/vendor/knative.dev/pkg/configmap/parse.go
+++ b/vendor/knative.dev/pkg/configmap/parse.go
@@ -23,8 +23,6 @@ import (
"time"
"k8s.io/apimachinery/pkg/api/resource"
- "k8s.io/apimachinery/pkg/api/validation"
- "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
)
@@ -146,47 +144,6 @@ func AsQuantity(key string, target **resource.Quantity) ParseFunc {
}
}
-// AsOptionalNamespacedName parses the value at key as a types.NamespacedName into the target, if it exists
-// The namespace and name are both required and expected to be valid DNS labels
-func AsOptionalNamespacedName(key string, target **types.NamespacedName) ParseFunc {
- return func(data map[string]string) error {
- if _, ok := data[key]; !ok {
- return nil
- }
-
- *target = &types.NamespacedName{}
- return AsNamespacedName(key, *target)(data)
- }
-}
-
-// AsNamespacedName parses the value at key as a types.NamespacedName into the target, if it exists
-// The namespace and name are both required and expected to be valid DNS labels
-func AsNamespacedName(key string, target *types.NamespacedName) ParseFunc {
- return func(data map[string]string) error {
- raw, ok := data[key]
- if !ok {
- return nil
- }
-
- v := strings.SplitN(raw, string(types.Separator), 3)
-
- if len(v) != 2 {
- return fmt.Errorf("failed to parse %q: expected 'namespace/name' format", key)
- }
-
- for _, val := range v {
- if errs := validation.ValidateNamespaceName(val, false); len(errs) > 0 {
- return fmt.Errorf("failed to parse %q: %s", key, strings.Join(errs, ", "))
- }
- }
-
- target.Namespace = v[0]
- target.Name = v[1]
-
- return nil
- }
-}
-
// Parse parses the given map using the parser functions passed in.
func Parse(data map[string]string, parsers ...ParseFunc) error {
for _, parse := range parsers {
diff --git a/vendor/knative.dev/pkg/controller/controller.go b/vendor/knative.dev/pkg/controller/controller.go
index 121db856775..5510883417d 100644
--- a/vendor/knative.dev/pkg/controller/controller.go
+++ b/vendor/knative.dev/pkg/controller/controller.go
@@ -205,40 +205,20 @@ type Impl struct {
statsReporter StatsReporter
}
-// ControllerOptions encapsulates options for creating a new controller,
-// including throttling and stats behavior.
-type ControllerOptions struct {
- WorkQueueName string
- Logger *zap.SugaredLogger
- Reporter StatsReporter
- RateLimiter workqueue.RateLimiter
-}
-
// NewImpl instantiates an instance of our controller that will feed work to the
// provided Reconciler as it is enqueued.
func NewImpl(r Reconciler, logger *zap.SugaredLogger, workQueueName string) *Impl {
- return NewImplFull(r, ControllerOptions{WorkQueueName: workQueueName, Logger: logger})
+ return NewImplWithStats(r, logger, workQueueName, MustNewStatsReporter(workQueueName, logger))
}
func NewImplWithStats(r Reconciler, logger *zap.SugaredLogger, workQueueName string, reporter StatsReporter) *Impl {
- return NewImplFull(r, ControllerOptions{WorkQueueName: workQueueName, Logger: logger, Reporter: reporter})
-}
-
-// NewImplFull accepts the full set of options available to all controllers.
-func NewImplFull(r Reconciler, options ControllerOptions) *Impl {
- logger := options.Logger.Named(options.WorkQueueName)
- if options.RateLimiter == nil {
- options.RateLimiter = workqueue.DefaultControllerRateLimiter()
- }
- if options.Reporter == nil {
- options.Reporter = MustNewStatsReporter(options.WorkQueueName, options.Logger)
- }
+ logger = logger.Named(workQueueName)
return &Impl{
- Name: options.WorkQueueName,
+ Name: workQueueName,
Reconciler: r,
- workQueue: newTwoLaneWorkQueue(options.WorkQueueName, options.RateLimiter),
+ workQueue: newTwoLaneWorkQueue(workQueueName),
logger: logger,
- statsReporter: options.Reporter,
+ statsReporter: reporter,
}
}
@@ -262,9 +242,7 @@ func (c *Impl) EnqueueAfter(obj interface{}, after time.Duration) {
// and enqueues that key in the slow lane.
func (c *Impl) EnqueueSlowKey(key types.NamespacedName) {
c.workQueue.SlowLane().Add(key)
- c.logger.With(zap.Object(logkey.Key, logging.NamespacedName(key))).
- Debug("Adding to the slow queue %s (depth(total/slow): %d/%d)",
- safeKey(key), c.workQueue.Len(), c.workQueue.SlowLane().Len())
+ c.logger.Debugf("Adding to the slow queue %s (depth(total/slow): %d/%d)", safeKey(key), c.workQueue.Len(), c.workQueue.SlowLane().Len())
}
// EnqueueSlow extracts namesspeced name from the object and enqueues it on the slow
@@ -390,8 +368,7 @@ func (c *Impl) EnqueueNamespaceOf(obj interface{}) {
// EnqueueKey takes a namespace/name string and puts it onto the work queue.
func (c *Impl) EnqueueKey(key types.NamespacedName) {
c.workQueue.Add(key)
- c.logger.With(zap.Object(logkey.Key, logging.NamespacedName(key))).
- Debugf("Adding to queue %s (depth: %d)", safeKey(key), c.workQueue.Len())
+ c.logger.Debugf("Adding to queue %s (depth: %d)", safeKey(key), c.workQueue.Len())
}
// MaybeEnqueueBucketKey takes a Bucket and namespace/name string and puts it onto
@@ -406,8 +383,7 @@ func (c *Impl) MaybeEnqueueBucketKey(bkt reconciler.Bucket, key types.Namespaced
// the work queue after given delay.
func (c *Impl) EnqueueKeyAfter(key types.NamespacedName, delay time.Duration) {
c.workQueue.AddAfter(key, delay)
- c.logger.With(zap.Object(logkey.Key, logging.NamespacedName(key))).
- Debugf("Adding to queue %s (delay: %v, depth: %d)", safeKey(key), delay, c.workQueue.Len())
+ c.logger.Debugf("Adding to queue %s (delay: %v, depth: %d)", safeKey(key), delay, c.workQueue.Len())
}
// RunContext starts the controller's worker threads, the number of which is threadiness.
diff --git a/vendor/knative.dev/pkg/controller/helper.go b/vendor/knative.dev/pkg/controller/helper.go
index 4a4ca5c6da4..b8cc08fb06b 100644
--- a/vendor/knative.dev/pkg/controller/helper.go
+++ b/vendor/knative.dev/pkg/controller/helper.go
@@ -23,11 +23,8 @@ import (
"knative.dev/pkg/kmeta"
)
-// Callback is a function that is passed to an informer's event handler.
type Callback func(interface{})
-// EnsureTypeMeta augments the passed-in callback, ensuring that all objects that pass
-// through this callback have their TypeMeta set according to the provided GVK.
func EnsureTypeMeta(f Callback, gvk schema.GroupVersionKind) Callback {
apiVersion, kind := gvk.ToAPIVersionAndKind()
@@ -37,23 +34,12 @@ func EnsureTypeMeta(f Callback, gvk schema.GroupVersionKind) Callback {
// TODO: We should consider logging here.
return
}
-
- accessor, err := meta.TypeAccessor(typed)
- if err != nil {
- return
- }
-
- // If TypeMeta is already what we want, exit early.
- if accessor.GetAPIVersion() == apiVersion && accessor.GetKind() == kind {
- f(typed)
- return
- }
-
- // We need to populate TypeMeta, but cannot trample the
+ // We need to populated TypeMeta, but cannot trample the
// informer's copy.
+ // TODO(mattmoor): Avoid the copy if TypeMeta is set.
copy := typed.DeepCopyObject()
- accessor, err = meta.TypeAccessor(copy)
+ accessor, err := meta.TypeAccessor(copy)
if err != nil {
return
}
diff --git a/vendor/knative.dev/pkg/controller/two_lane_queue.go b/vendor/knative.dev/pkg/controller/two_lane_queue.go
index 919cc900679..ebd53df5b83 100644
--- a/vendor/knative.dev/pkg/controller/two_lane_queue.go
+++ b/vendor/knative.dev/pkg/controller/two_lane_queue.go
@@ -37,7 +37,8 @@ type twoLaneQueue struct {
}
// Creates a new twoLaneQueue.
-func newTwoLaneWorkQueue(name string, rl workqueue.RateLimiter) *twoLaneQueue {
+func newTwoLaneWorkQueue(name string) *twoLaneQueue {
+ rl := workqueue.DefaultControllerRateLimiter()
tlq := &twoLaneQueue{
RateLimitingInterface: workqueue.NewNamedRateLimitingQueue(
rl,
diff --git a/vendor/knative.dev/pkg/hack/update-deps.sh b/vendor/knative.dev/pkg/hack/update-deps.sh
index b7e31052b8c..a39fc858fe4 100644
--- a/vendor/knative.dev/pkg/hack/update-deps.sh
+++ b/vendor/knative.dev/pkg/hack/update-deps.sh
@@ -26,7 +26,7 @@ cd ${ROOT_DIR}
# The list of dependencies that we track at HEAD and periodically
# float forward in this repository.
FLOATING_DEPS=(
- "knative.dev/test-infra@release-0.17"
+ "knative.dev/test-infra@master"
)
# Parse flags to determine any we should pass to dep.
diff --git a/vendor/knative.dev/pkg/injection/sharedmain/main.go b/vendor/knative.dev/pkg/injection/sharedmain/main.go
index 526320fcd45..1cbba0264c3 100644
--- a/vendor/knative.dev/pkg/injection/sharedmain/main.go
+++ b/vendor/knative.dev/pkg/injection/sharedmain/main.go
@@ -30,7 +30,6 @@ import (
"go.opencensus.io/stats/view"
"golang.org/x/sync/errgroup"
corev1 "k8s.io/api/core/v1"
- apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/wait"
@@ -38,9 +37,8 @@ import (
"k8s.io/client-go/tools/clientcmd"
"k8s.io/klog"
- _ "go.uber.org/automaxprocs" // automatically set GOMAXPROCS based on cgroups
"go.uber.org/zap"
-
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
kubeclient "knative.dev/pkg/client/injection/kube/client"
"knative.dev/pkg/configmap"
"knative.dev/pkg/controller"
@@ -190,8 +188,6 @@ func MainWithConfig(ctx context.Context, component string, cfg *rest.Config, cto
if cfg.Burst == 0 {
cfg.Burst = len(ctors) * rest.DefaultBurst
}
- ctx = injection.WithConfig(ctx, cfg)
-
ctx, informers := injection.Default.SetupInformers(ctx, cfg)
logger, atomicLevel := SetupLoggerOrDie(ctx, component)
diff --git a/vendor/knative.dev/pkg/metrics/config.go b/vendor/knative.dev/pkg/metrics/config.go
index 593d4a890d9..a1573cdab8b 100644
--- a/vendor/knative.dev/pkg/metrics/config.go
+++ b/vendor/knative.dev/pkg/metrics/config.go
@@ -156,9 +156,9 @@ func NewStackdriverClientConfigFromMap(config map[string]string) *StackdriverCli
// record applies the `ros` Options to each measurement in `mss` and then records the resulting
// measurements in the metricsConfig's designated backend.
func (mc *metricsConfig) record(ctx context.Context, mss []stats.Measurement, ros ...stats.Options) error {
- if mc == nil || mc.backendDestination == none {
- // Don't record data points if the metric config is not initialized yet or if
- // the defined backend is "none" explicitly.
+ if mc == nil {
+ // Don't record data points if the metric config is not initialized yet.
+ // At this point, it's unclear whether should record or not.
return nil
}
@@ -292,14 +292,12 @@ func createMetricsConfig(ops ExporterOptions, logger *zap.SugaredLogger) (*metri
return nil, fmt.Errorf("invalid %s value %q", reportingPeriodKey, repStr)
}
mc.reportingPeriod = time.Duration(repInt) * time.Second
- } else {
- switch mc.backendDestination {
- case stackdriver, openCensus:
- mc.reportingPeriod = time.Minute
- case prometheus:
- mc.reportingPeriod = 5 * time.Second
- }
+ } else if mc.backendDestination == stackdriver {
+ mc.reportingPeriod = 60 * time.Second
+ } else if mc.backendDestination == prometheus {
+ mc.reportingPeriod = 5 * time.Second
}
+
return &mc, nil
}
diff --git a/vendor/knative.dev/pkg/metrics/config_observability.go b/vendor/knative.dev/pkg/metrics/config_observability.go
index d17fba89cfa..5bedbac0257 100644
--- a/vendor/knative.dev/pkg/metrics/config_observability.go
+++ b/vendor/knative.dev/pkg/metrics/config_observability.go
@@ -18,7 +18,6 @@ package metrics
import (
"os"
- "strings"
texttemplate "text/template"
corev1 "k8s.io/api/core/v1"
@@ -60,9 +59,6 @@ type ObservabilityConfig struct {
// EnableProfiling indicates whether it is allowed to retrieve runtime profiling data from
// the pods via an HTTP server in the format expected by the pprof visualization tool.
EnableProfiling bool
-
- // EnableRequestLog enables activator/queue-proxy to write request logs.
- EnableRequestLog bool
}
func defaultConfig() *ObservabilityConfig {
@@ -87,17 +83,6 @@ func NewObservabilityConfigFromConfigMap(configMap *corev1.ConfigMap) (*Observab
return nil, err
}
- if raw, ok := configMap.Data["logging.enable-request-log"]; ok {
- if strings.EqualFold(raw, "true") && oc.RequestLogTemplate != "" {
- oc.EnableRequestLog = true
- }
- } else if oc.RequestLogTemplate != "" {
- // TODO: remove this after 0.17 cuts, this is meant only for smooth transition to the new flag.
- // Once 0.17 cuts we should set a proper default value and users will need to set the flag explicitly
- // to enable request logging.
- oc.EnableRequestLog = true
- }
-
if oc.RequestLogTemplate != "" {
// Verify that we get valid templates.
if _, err := texttemplate.New("requestLog").Parse(oc.RequestLogTemplate); err != nil {
diff --git a/vendor/knative.dev/pkg/metrics/exporter.go b/vendor/knative.dev/pkg/metrics/exporter.go
index b3612618676..59ed8364c7f 100644
--- a/vendor/knative.dev/pkg/metrics/exporter.go
+++ b/vendor/knative.dev/pkg/metrics/exporter.go
@@ -240,7 +240,12 @@ func setCurMetricsConfig(c *metricsConfig) {
}
func setCurMetricsConfigUnlocked(c *metricsConfig) {
- setReportingPeriod(c)
+ if c != nil {
+ view.SetReportingPeriod(c.reportingPeriod)
+ } else {
+ // Setting to 0 enables the default behavior.
+ view.SetReportingPeriod(0)
+ }
curMetricsConfig = c
}
diff --git a/vendor/knative.dev/pkg/metrics/metricstest/metricstest.go b/vendor/knative.dev/pkg/metrics/metricstest/metricstest.go
index ca0c82ec2e9..83861e61b68 100644
--- a/vendor/knative.dev/pkg/metrics/metricstest/metricstest.go
+++ b/vendor/knative.dev/pkg/metrics/metricstest/metricstest.go
@@ -118,31 +118,9 @@ func CheckDistributionCount(t test.T, name string, wantTags map[string]string, e
}
-// GetLastValueData returns the last value for the given metric, verifying tags.
-func GetLastValueData(t test.T, name string, tags map[string]string) float64 {
- t.Helper()
- return GetLastValueDataWithMeter(t, name, tags, nil)
-}
-
-// GetLastValueDataWithMeter returns the last value of the given metric using meter, verifying tags.
-func GetLastValueDataWithMeter(t test.T, name string, tags map[string]string, meter view.Meter) float64 {
- t.Helper()
- if row := lastRow(t, name, meter); row != nil {
- checkRowTags(t, row, name, tags)
-
- s, ok := row.Data.(*view.LastValueData)
- if !ok {
- t.Error("want LastValueData", "metric", name, "got", reflect.TypeOf(row.Data))
- }
- return s.Value
- }
- return 0
-}
-
// CheckLastValueData checks the view with a name matching string name to verify that the LastValueData stats
// reported are tagged with the tags in wantTags and that wantValue matches reported last value.
func CheckLastValueData(t test.T, name string, wantTags map[string]string, wantValue float64) {
- t.Helper()
CheckLastValueDataWithMeter(t, name, wantTags, wantValue, nil)
}
@@ -151,8 +129,14 @@ func CheckLastValueData(t test.T, name string, wantTags map[string]string, wantV
// the tags in wantTags and that wantValue matches the last reported value.
func CheckLastValueDataWithMeter(t test.T, name string, wantTags map[string]string, wantValue float64, meter view.Meter) {
t.Helper()
- if v := GetLastValueDataWithMeter(t, name, wantTags, meter); v != wantValue {
- t.Error("Reporter.Report() wrong value", "metric", name, "got", v, "want", wantValue)
+ if row := lastRow(t, name, meter); row != nil {
+ checkRowTags(t, row, name, wantTags)
+
+ if s, ok := row.Data.(*view.LastValueData); !ok {
+ t.Error("want LastValueData", "metric", name, "got", reflect.TypeOf(row.Data))
+ } else if s.Value != wantValue {
+ t.Error("Reporter.Report() wrong value", "metric", name, "got", s.Value, "want", wantValue)
+ }
}
}
diff --git a/vendor/knative.dev/pkg/metrics/metricstest/resource_metrics.go b/vendor/knative.dev/pkg/metrics/metricstest/resource_metrics.go
index bbe622706e3..eeaeda6d7df 100644
--- a/vendor/knative.dev/pkg/metrics/metricstest/resource_metrics.go
+++ b/vendor/knative.dev/pkg/metrics/metricstest/resource_metrics.go
@@ -30,7 +30,6 @@ import (
"go.opencensus.io/metric/metricdata"
"go.opencensus.io/metric/metricproducer"
"go.opencensus.io/resource"
- "go.opencensus.io/stats/view"
)
// Value provides a simplified implementation of a metric Value suitable for
@@ -41,10 +40,6 @@ type Value struct {
Int64 *int64
Float64 *float64
Distribution *metricdata.Distribution
- // VerifyDistributionCountOnly makes Equal compare the Distribution with the
- // field Count only, and ingore all other fields of Distribution.
- // This is ingored when the value is not a Distribution.
- VerifyDistributionCountOnly bool
}
// Metric provides a simplified (for testing) implementation of a metric report
@@ -97,9 +92,7 @@ func NewMetric(metric *metricdata.Metric) Metric {
for _, ts := range metric.TimeSeries {
tags := make(map[string]string, len(metric.Descriptor.LabelKeys))
for i, k := range metric.Descriptor.LabelKeys {
- if ts.LabelValues[i].Present {
- tags[k.Key] = ts.LabelValues[i].Value
- }
+ tags[k.Key] = ts.LabelValues[i].Value
}
v := Value{Tags: tags}
ts.Points[0].ReadValue(&v)
@@ -109,18 +102,6 @@ func NewMetric(metric *metricdata.Metric) Metric {
return value
}
-// EnsureRecorded makes sure that all stats metrics are actually flushed and recorded.
-func EnsureRecorded() {
- // stats.Record queues the actual record to a channel to be accounted for by
- // a background goroutine (nonblocking). Call a method which does a
- // round-trip to that goroutine to ensure that records have been flushed.
- for _, producer := range metricproducer.GlobalManager().GetAll() {
- if meter, ok := producer.(view.Meter); ok {
- meter.Find("nonexistent")
- }
- }
-}
-
// GetMetric returns all values for the named metric.
func GetMetric(name string) []Metric {
producers := metricproducer.GlobalManager().GetAll()
@@ -145,70 +126,57 @@ func GetOneMetric(name string) Metric {
return m[0]
}
-// IntMetric creates an Int64 metric.
-func IntMetric(name string, value int64, tags map[string]string) Metric {
- return Metric{
- Name: name,
- Values: []Value{{Int64: &value, Tags: tags}},
+func genericMetricFactory(name string, v Value, keyvalues ...string) Metric {
+ if len(keyvalues)%2 != 0 {
+ panic("Odd number of arguments to CountMetric")
+ }
+ if v.Tags == nil {
+ v.Tags = make(map[string]string, len(keyvalues)/2)
+ }
+ for i := 0; i < len(keyvalues); i += 2 {
+ v.Tags[keyvalues[i]] = keyvalues[i+1]
}
-}
-
-// FloatMetric creates a Float64 metric
-func FloatMetric(name string, value float64, tags map[string]string) Metric {
return Metric{
Name: name,
- Values: []Value{{Float64: &value, Tags: tags}},
+ Values: []Value{v},
}
}
-// DistributionCountOnlyMetric creates a distrubtion metric for test, and verifying only the count.
-func DistributionCountOnlyMetric(name string, count int64, tags map[string]string) Metric {
- return Metric{
- Name: name,
- Values: []Value{{
- Distribution: &metricdata.Distribution{Count: count},
- Tags: tags,
- VerifyDistributionCountOnly: true}},
- }
+// IntMetric is a shortcut factory for creating an Int64 metric.
+func IntMetric(name string, value int64, keyvalues ...string) Metric {
+ return genericMetricFactory(name, Value{Int64: &value}, keyvalues...)
}
-// WithResource sets the resource of the metric.
-func (m Metric) WithResource(r *resource.Resource) Metric {
- m.Resource = r
- return m
+// FloatMetric is a shortcut factor for creating a Float64 metric
+func FloatMetric(name string, value float64, keyvalues ...string) Metric {
+ return genericMetricFactory(name, Value{Float64: &value}, keyvalues...)
}
-// AssertMetric verifies that the metrics have the specified values. Note that
-// this method will spuriously fail if there are multiple metrics with the same
-// name on different Meters. Calls EnsureRecorded internally before fetching the
-// batch of metrics.
+// AssertMetric verifies that the metrics have the specified values.
func AssertMetric(t *testing.T, values ...Metric) {
t.Helper()
- EnsureRecorded()
for _, v := range values {
if diff := cmp.Diff(v, GetOneMetric(v.Name)); diff != "" {
- t.Errorf("Wrong metric (-want +got): %s", diff)
+ t.Errorf("Wrong adds (-want +got): %s", diff)
}
}
}
// AssertMetricExists verifies that at least one metric values has been reported for
// each of metric names.
-// Calls EnsureRecorded internally before fetching the batch of metrics.
func AssertMetricExists(t *testing.T, names ...string) {
- metrics := make([]Metric, 0, len(names))
- for _, n := range names {
- metrics = append(metrics, Metric{Name: n})
+ t.Helper()
+ for _, name := range names {
+ if len(GetMetric(name)) == 0 {
+ t.Errorf("No metrics found for %q", name)
+ }
}
- AssertMetric(t, metrics...)
}
// AssertNoMetric verifies that no metrics have been reported for any of the
// metric names.
-// Calls EnsureRecorded internally before fetching the batch of metrics.
func AssertNoMetric(t *testing.T, names ...string) {
t.Helper()
- EnsureRecorded()
for _, name := range names {
if m := GetMetric(name); len(m) != 0 {
t.Error("Found unexpected data for:", m)
@@ -311,9 +279,6 @@ func (v Value) Equal(other Value) bool {
if v.Distribution.Count != other.Distribution.Count {
return false
}
- if v.VerifyDistributionCountOnly || other.VerifyDistributionCountOnly {
- return true
- }
if v.Distribution.Sum != other.Distribution.Sum {
return false
}
diff --git a/vendor/knative.dev/pkg/metrics/opencensus_exporter.go b/vendor/knative.dev/pkg/metrics/opencensus_exporter.go
index d94b53e6980..651d3ce6644 100644
--- a/vendor/knative.dev/pkg/metrics/opencensus_exporter.go
+++ b/vendor/knative.dev/pkg/metrics/opencensus_exporter.go
@@ -52,15 +52,11 @@ func newOpenCensusExporter(config *metricsConfig, logger *zap.SugaredLogger) (vi
}
logger.Infow("Created OpenCensus exporter with config:", zap.Any("config", *config))
view.RegisterExporter(e)
- return e, getFactory(e, opts), nil
+ return e, getFactory(opts), nil
}
-func getFactory(defaultExporter view.Exporter, stored []ocagent.ExporterOption) ResourceExporterFactory {
+func getFactory(stored []ocagent.ExporterOption) ResourceExporterFactory {
return func(r *resource.Resource) (view.Exporter, error) {
- if r == nil || (r.Type == "" && len(r.Labels) == 0) {
- // Don't create duplicate exporters for the default exporter.
- return defaultExporter, nil
- }
opts := append(stored, ocagent.WithResourceDetector(
func(context.Context) (*resource.Resource, error) {
return r, nil
diff --git a/vendor/knative.dev/pkg/metrics/resource_view.go b/vendor/knative.dev/pkg/metrics/resource_view.go
index 7d69ae94ec1..ff42f9d671c 100644
--- a/vendor/knative.dev/pkg/metrics/resource_view.go
+++ b/vendor/knative.dev/pkg/metrics/resource_view.go
@@ -50,7 +50,10 @@ type ResourceExporterFactory func(*resource.Resource) (view.Exporter, error)
type meters struct {
meters map[string]*meterExporter
factory ResourceExporterFactory
- lock sync.Mutex
+ // Cache of Resource pointers from metricskey to Meters, to avoid
+ // unnecessary stringify operations
+ resourceToKey map[*resource.Resource]string
+ lock sync.Mutex
}
// Lock regime: lock allMeters before resourceViews. The critical path is in
@@ -58,7 +61,8 @@ type meters struct {
// resourceViews if a new meter needs to be created.
var resourceViews = storedViews{}
var allMeters = meters{
- meters: map[string]*meterExporter{"": &defaultMeter},
+ meters: map[string]*meterExporter{"": &defaultMeter},
+ resourceToKey: map[*resource.Resource]string{nil: ""},
}
// RegisterResourceView is similar to view.Register(), except that it will
@@ -155,19 +159,6 @@ func setFactory(f ResourceExporterFactory) error {
return retErr
}
-func setReportingPeriod(mc *metricsConfig) {
- allMeters.lock.Lock()
- defer allMeters.lock.Unlock()
-
- rp := time.Duration(0)
- if mc != nil {
- rp = mc.reportingPeriod
- }
- for _, meter := range allMeters.meters {
- meter.m.SetReportingPeriod(rp)
- }
-}
-
func flushResourceExporters() {
allMeters.lock.Lock()
defer allMeters.lock.Unlock()
@@ -177,23 +168,13 @@ func flushResourceExporters() {
}
}
-// ClearMetersForTest clears the internal set of metrics being exported,
-// including cleaning up background threads.
-func ClearMetersForTest() {
- allMeters.lock.Lock()
- defer allMeters.lock.Unlock()
-
- for k, meter := range allMeters.meters {
- if k == "" {
- continue
- }
- meter.m.Stop()
- delete(allMeters.meters, k)
+func meterExporterForResource(r *resource.Resource) *meterExporter {
+ key, ok := allMeters.resourceToKey[r]
+ if !ok {
+ key = resourceToKey(r)
+ allMeters.resourceToKey[r] = key
}
-}
-func meterExporterForResource(r *resource.Resource) *meterExporter {
- key := resourceToKey(r)
mE := allMeters.meters[key]
if mE == nil {
mE = &meterExporter{}
@@ -205,11 +186,6 @@ func meterExporterForResource(r *resource.Resource) *meterExporter {
mE.m = view.NewMeter()
mE.m.SetResource(r)
mE.m.Start()
-
- mc := getCurMetricsConfig()
- if mc != nil {
- mE.m.SetReportingPeriod(mc.reportingPeriod)
- }
resourceViews.lock.Lock()
defer resourceViews.lock.Unlock()
// make a copy of views to avoid data races
@@ -258,9 +234,6 @@ func optionForResource(r *resource.Resource) (stats.Options, error) {
}
func resourceToKey(r *resource.Resource) string {
- if r == nil {
- return ""
- }
var s strings.Builder
l := len(r.Type)
kvs := make([]string, 0, len(r.Labels))
diff --git a/vendor/knative.dev/pkg/metrics/stackdriver_exporter.go b/vendor/knative.dev/pkg/metrics/stackdriver_exporter.go
index befb95dbae5..44ae68b68d5 100644
--- a/vendor/knative.dev/pkg/metrics/stackdriver_exporter.go
+++ b/vendor/knative.dev/pkg/metrics/stackdriver_exporter.go
@@ -21,7 +21,6 @@ import (
"fmt"
"path"
"sync"
- "time"
sd "contrib.go.opencensus.io/exporter/stackdriver"
"go.opencensus.io/resource"
@@ -50,8 +49,6 @@ const (
StackdriverSecretNameDefault = "stackdriver-service-account-key"
// secretDataFieldKey is the name of the k8s Secret field that contains the Secret's key.
secretDataFieldKey = "key.json"
- // stackdriverApiTimeout is the timeout value of Stackdriver API service side.
- stackdriverApiTimeout = 12 * time.Second
)
var (
@@ -189,7 +186,6 @@ func newStackdriverExporter(config *metricsConfig, logger *zap.SugaredLogger) (v
GetMetricPrefix: mpf,
ReportingInterval: config.reportingPeriod,
DefaultMonitoringLabels: &sd.Labels{},
- Timeout: stackdriverApiTimeout,
})
if err != nil {
logger.Errorw("Failed to create the Stackdriver exporter: ", zap.Error(err))
diff --git a/vendor/knative.dev/pkg/network/transports.go b/vendor/knative.dev/pkg/network/transports.go
index a692c5f5b31..c5027625906 100644
--- a/vendor/knative.dev/pkg/network/transports.go
+++ b/vendor/knative.dev/pkg/network/transports.go
@@ -92,7 +92,7 @@ func dialBackOffHelper(ctx context.Context, network, address string, bo wait.Bac
return nil, fmt.Errorf("timed out dialing after %.2fs", elapsed.Seconds())
}
-func newHTTPTransport(disableKeepAlives bool, maxIdle, maxIdlePerHost int) http.RoundTripper {
+func newHTTPTransport(disableKeepAlives bool) http.RoundTripper {
return &http.Transport{
// Those match net/http/transport.go
Proxy: http.ProxyFromEnvironment,
@@ -103,8 +103,8 @@ func newHTTPTransport(disableKeepAlives bool, maxIdle, maxIdlePerHost int) http.
// Those are bespoke.
DialContext: DialWithBackOff,
- MaxIdleConns: maxIdle,
- MaxIdleConnsPerHost: maxIdlePerHost,
+ MaxIdleConns: 1000,
+ MaxIdleConnsPerHost: 100,
}
}
@@ -112,17 +112,17 @@ func newHTTPTransport(disableKeepAlives bool, maxIdle, maxIdlePerHost int) http.
// since it will not cache connections.
func NewProberTransport() http.RoundTripper {
return newAutoTransport(
- newHTTPTransport(true /*disable keep-alives*/, 0, 0 /*no caching*/),
+ newHTTPTransport(true /*disable keep-alives*/),
NewH2CTransport())
}
// NewAutoTransport creates a RoundTripper that can use appropriate transport
// based on the request's HTTP version.
-func NewAutoTransport(maxIdle, maxIdlePerHost int) http.RoundTripper {
+func NewAutoTransport() http.RoundTripper {
return newAutoTransport(
- newHTTPTransport(false /*disable keep-alives*/, maxIdle, maxIdlePerHost),
+ newHTTPTransport(false /*disable keep-alives*/),
NewH2CTransport())
}
// AutoTransport uses h2c for HTTP2 requests and falls back to `http.DefaultTransport` for all others
-var AutoTransport = NewAutoTransport(1000, 100)
+var AutoTransport = NewAutoTransport()
diff --git a/vendor/knative.dev/pkg/profiling/server.go b/vendor/knative.dev/pkg/profiling/server.go
index e543489add6..fe27ac04e5f 100644
--- a/vendor/knative.dev/pkg/profiling/server.go
+++ b/vendor/knative.dev/pkg/profiling/server.go
@@ -21,8 +21,8 @@ import (
"net/http"
"net/http/pprof"
"strconv"
+ "sync/atomic"
- "go.uber.org/atomic"
"go.uber.org/zap"
corev1 "k8s.io/api/core/v1"
)
@@ -39,7 +39,7 @@ const (
// Handler holds the main HTTP handler and a flag indicating
// whether the handler is active
type Handler struct {
- enabled *atomic.Bool
+ enabled int32
handler http.Handler
log *zap.SugaredLogger
}
@@ -58,14 +58,14 @@ func NewHandler(logger *zap.SugaredLogger, enableProfiling bool) *Handler {
logger.Infof("Profiling enabled: %t", enableProfiling)
return &Handler{
- enabled: atomic.NewBool(enableProfiling),
+ enabled: boolToInt32(enableProfiling),
handler: mux,
log: logger,
}
}
func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- if h.enabled.Load() {
+ if atomic.LoadInt32(&h.enabled) == 1 {
h.handler.ServeHTTP(w, r)
} else {
http.NotFoundHandler().ServeHTTP(w, r)
@@ -93,7 +93,9 @@ func (h *Handler) UpdateFromConfigMap(configMap *corev1.ConfigMap) {
return
}
- if h.enabled.Swap(enabled) != enabled {
+ new := boolToInt32(enabled)
+ old := atomic.SwapInt32(&h.enabled, new)
+ if old != new {
h.log.Infof("Profiling enabled: %t", enabled)
}
}
@@ -105,3 +107,10 @@ func NewServer(handler http.Handler) *http.Server {
Handler: handler,
}
}
+
+func boolToInt32(b bool) int32 {
+ if b {
+ return 1
+ }
+ return 0
+}
diff --git a/vendor/knative.dev/pkg/ptr/ptr.go b/vendor/knative.dev/pkg/ptr/ptr.go
index 6d5eeaab69d..a3bfef85c6d 100644
--- a/vendor/knative.dev/pkg/ptr/ptr.go
+++ b/vendor/knative.dev/pkg/ptr/ptr.go
@@ -30,18 +30,6 @@ func Int64(i int64) *int64 {
return &i
}
-// Float32 is a helper for turning floats into pointers for use in
-// API types that want *float32.
-func Float32(f float32) *float32 {
- return &f
-}
-
-// Float64 is a helper for turning floats into pointers for use in
-// API types that want *float64.
-func Float64(f float64) *float64 {
- return &f
-}
-
// Bool is a helper for turning bools into pointers for use in
// API types that want *bool.
func Bool(b bool) *bool {
diff --git a/vendor/knative.dev/pkg/reconciler/reconcile_common.go b/vendor/knative.dev/pkg/reconciler/reconcile_common.go
index f7408fb4771..a807edfb688 100644
--- a/vendor/knative.dev/pkg/reconciler/reconcile_common.go
+++ b/vendor/knative.dev/pkg/reconciler/reconcile_common.go
@@ -30,13 +30,6 @@ import (
const failedGenerationBump = "NewObservedGenFailure"
-const (
- DoReconcileKind = "ReconcileKind"
- DoFinalizeKind = "FinalizeKind"
- DoObserveKind = "ObserveKind"
- DoObserveFinalizeKind = "ObserveFinalizeKind"
-)
-
// PreProcessReconcile contains logic to apply before reconciliation of a resource.
func PreProcessReconcile(ctx context.Context, resource duckv1.KRShaped) {
newStatus := resource.GetStatus()
diff --git a/vendor/knative.dev/pkg/reconciler/testing/table.go b/vendor/knative.dev/pkg/reconciler/testing/table.go
index 193861596a7..6059fefecce 100644
--- a/vendor/knative.dev/pkg/reconciler/testing/table.go
+++ b/vendor/knative.dev/pkg/reconciler/testing/table.go
@@ -102,15 +102,6 @@ type TableRow struct {
CmpOpts []cmp.Option
}
-var (
- ignoreLastTransitionTime = cmp.FilterPath(func(p cmp.Path) bool {
- return strings.HasSuffix(p.String(), "LastTransitionTime.Inner.Time")
- }, cmp.Ignore())
-
- ignoreQuantity = cmpopts.IgnoreUnexported(resource.Quantity{})
- defaultCmpOpts = []cmp.Option{ignoreLastTransitionTime, ignoreQuantity, cmpopts.EquateEmpty()}
-)
-
func objKey(o runtime.Object) string {
on := o.(kmeta.Accessor)
@@ -167,7 +158,6 @@ func (r *TableRow) Test(t *testing.T, factory Factory) {
t.Errorf("Error capturing actions by verb: %q", err)
}
- effectiveOpts := append(r.CmpOpts, defaultCmpOpts...)
// Previous state is used to diff resource expected state for update requests that were missed.
objPrevState := make(map[string]runtime.Object, len(r.Objects))
for _, o := range r.Objects {
@@ -187,9 +177,8 @@ func (r *TableRow) Test(t *testing.T, factory Factory) {
t.Errorf("Unexpected action[%d]: %#v", i, got)
}
- if !cmp.Equal(want, obj, effectiveOpts...) {
- t.Errorf("Unexpected create (-want, +got):\n%s",
- cmp.Diff(want, obj, effectiveOpts...))
+ if diff := cmp.Diff(want, obj, append(r.CmpOpts, ignoreLastTransitionTime, safeDeployDiff, cmpopts.EquateEmpty())...); diff != "" {
+ t.Errorf("Unexpected create (-want, +got): %s", diff)
}
}
if got, want := len(actions.Creates), len(r.WantCreates); got > want {
@@ -208,8 +197,8 @@ func (r *TableRow) Test(t *testing.T, factory Factory) {
t.Errorf("Object %s was never created: want: %#v", key, wo)
continue
}
- t.Errorf("Missing update for %s (-want, +prevState):\n%s", key,
- cmp.Diff(wo, oldObj, effectiveOpts...))
+ t.Errorf("Missing update for %s (-want, +prevState): %s", key,
+ cmp.Diff(wo, oldObj, append(r.CmpOpts, ignoreLastTransitionTime, safeDeployDiff, cmpopts.EquateEmpty())...))
continue
}
@@ -222,9 +211,8 @@ func (r *TableRow) Test(t *testing.T, factory Factory) {
// Update the object state.
objPrevState[objKey(got)] = got
- if !cmp.Equal(want.GetObject(), got, effectiveOpts...) {
- t.Errorf("Unexpected update (-want, +got):\n%s",
- cmp.Diff(want.GetObject(), got, effectiveOpts...))
+ if diff := cmp.Diff(want.GetObject(), got, append(r.CmpOpts, ignoreLastTransitionTime, safeDeployDiff, cmpopts.EquateEmpty())...); diff != "" {
+ t.Errorf("Unexpected update (-want, +got): %s", diff)
}
}
if got, want := len(updates), len(r.WantUpdates); got > want {
@@ -244,8 +232,8 @@ func (r *TableRow) Test(t *testing.T, factory Factory) {
t.Errorf("Object %s was never created: want: %#v", key, wo)
continue
}
- t.Errorf("Missing status update for %s (-want, +prevState):\n%s", key,
- cmp.Diff(wo, oldObj, effectiveOpts...))
+ t.Errorf("Missing status update for %s (-want, +prevState): %s", key,
+ cmp.Diff(wo, oldObj, append(r.CmpOpts, ignoreLastTransitionTime, safeDeployDiff, cmpopts.EquateEmpty())...))
continue
}
@@ -254,9 +242,8 @@ func (r *TableRow) Test(t *testing.T, factory Factory) {
// Update the object state.
objPrevState[objKey(got)] = got
- if !cmp.Equal(want.GetObject(), got, effectiveOpts...) {
- t.Errorf("Unexpected status update (-want, +got):\n%s\nFull: %v",
- cmp.Diff(want.GetObject(), got, effectiveOpts...), got)
+ if diff := cmp.Diff(want.GetObject(), got, append(r.CmpOpts, ignoreLastTransitionTime, safeDeployDiff, cmpopts.EquateEmpty())...); diff != "" {
+ t.Errorf("Unexpected status update (-want, +got): %s\nFull: %v", diff, got)
}
}
if got, want := len(statusUpdates), len(r.WantStatusUpdates); got > want {
@@ -268,8 +255,8 @@ func (r *TableRow) Test(t *testing.T, factory Factory) {
t.Errorf("Object %s was never created: want: %#v", key, wo)
continue
}
- t.Errorf("Extra status update for %s (-extra, +prevState):\n%s", key,
- cmp.Diff(wo, oldObj, effectiveOpts...))
+ t.Errorf("Extra status update for %s (-extra, +prevState): %s", key,
+ cmp.Diff(wo, oldObj, append(r.CmpOpts, ignoreLastTransitionTime, safeDeployDiff, cmpopts.EquateEmpty())...))
}
}
@@ -341,8 +328,8 @@ func (r *TableRow) Test(t *testing.T, factory Factory) {
got.GetName() != expectedNamespace) {
t.Errorf("Unexpected patch[%d]: %#v", i, got)
}
- if got, want := string(got.GetPatch()), string(want.GetPatch()); got != want {
- t.Errorf("Unexpected patch(-want, +got):\n%s", cmp.Diff(want, got))
+ if diff := cmp.Diff(string(want.GetPatch()), string(got.GetPatch())); diff != "" {
+ t.Errorf("Unexpected patch(-want, +got): %s", diff)
}
}
if got, want := len(actions.Patches), len(r.WantPatches); got > want {
@@ -354,17 +341,17 @@ func (r *TableRow) Test(t *testing.T, factory Factory) {
gotEvents := eventList.Events()
for i, want := range r.WantEvents {
if i >= len(gotEvents) {
- t.Error("Missing event:", want)
+ t.Errorf("Missing event: %s", want)
continue
}
- if !cmp.Equal(want, gotEvents[i]) {
- t.Errorf("Unexpected event(-want, +got):\n%s", cmp.Diff(want, gotEvents[i]))
+ if diff := cmp.Diff(want, gotEvents[i]); diff != "" {
+ t.Errorf("unexpected event(-want, +got): %s", diff)
}
}
if got, want := len(gotEvents), len(r.WantEvents); got > want {
for _, extra := range gotEvents[want:] {
- t.Error("Extra event:", extra)
+ t.Errorf("Extra event: %s", extra)
}
}
@@ -392,18 +379,25 @@ func (tt TableTest) Test(t *testing.T, factory Factory) {
t.Helper()
for _, test := range tt {
// Record the original objects in table.
- originObjects := make([]runtime.Object, len(test.Objects))
- for i, obj := range test.Objects {
- originObjects[i] = obj.DeepCopyObject()
+ originObjects := make([]runtime.Object, 0, len(test.Objects))
+ for _, obj := range test.Objects {
+ originObjects = append(originObjects, obj.DeepCopyObject())
}
t.Run(test.Name, func(t *testing.T) {
t.Helper()
test.Test(t, factory)
- // Validate cached objects do not get soiled after controller loops.
- if !cmp.Equal(originObjects, test.Objects, defaultCmpOpts...) {
- t.Errorf("Unexpected objects (-want, +got):\n%s",
- cmp.Diff(originObjects, test.Objects, defaultCmpOpts...))
- }
})
+ // Validate cached objects do not get soiled after controller loops
+ if diff := cmp.Diff(originObjects, test.Objects, safeDeployDiff, cmpopts.EquateEmpty()); diff != "" {
+ t.Errorf("Unexpected objects in test %s (-want, +got): %v", test.Name, diff)
+ }
}
}
+
+var (
+ ignoreLastTransitionTime = cmp.FilterPath(func(p cmp.Path) bool {
+ return strings.HasSuffix(p.String(), "LastTransitionTime.Inner.Time")
+ }, cmp.Ignore())
+
+ safeDeployDiff = cmpopts.IgnoreUnexported(resource.Quantity{})
+)
diff --git a/vendor/knative.dev/pkg/test/clients.go b/vendor/knative.dev/pkg/test/clients.go
index 374ef5acaaf..6c2f0068fe7 100644
--- a/vendor/knative.dev/pkg/test/clients.go
+++ b/vendor/knative.dev/pkg/test/clients.go
@@ -102,9 +102,7 @@ func (client *KubeClient) PodLogs(podName, containerName, namespace string) ([]b
if err != nil {
return nil, err
}
- for i := range podList.Items {
- // Pods are big, so avoid copying.
- pod := &podList.Items[i]
+ for _, pod := range podList.Items {
if strings.Contains(pod.Name, podName) {
result := pods.GetLogs(pod.Name, &corev1.PodLogOptions{
Container: containerName,
@@ -112,5 +110,5 @@ func (client *KubeClient) PodLogs(podName, containerName, namespace string) ([]b
return result.Raw()
}
}
- return nil, fmt.Errorf("could not find logs for %s/%s:%s", namespace, podName, containerName)
+ return nil, fmt.Errorf("could not find logs for %s/%s", podName, containerName)
}
diff --git a/vendor/knative.dev/pkg/test/kube_checks.go b/vendor/knative.dev/pkg/test/kube_checks.go
index 897dac0863a..61a259ac96e 100644
--- a/vendor/knative.dev/pkg/test/kube_checks.go
+++ b/vendor/knative.dev/pkg/test/kube_checks.go
@@ -185,7 +185,7 @@ func WaitForLogContent(client *KubeClient, podName, containerName, namespace, co
// WaitForAllPodsRunning waits for all the pods to be in running state
func WaitForAllPodsRunning(client *KubeClient, namespace string) error {
- return WaitForPodListState(client, podsRunning, "PodsAreRunning", namespace)
+ return WaitForPodListState(client, PodsRunning, "PodsAreRunning", namespace)
}
// WaitForPodRunning waits for the given pod to be in running state
@@ -196,23 +196,22 @@ func WaitForPodRunning(client *KubeClient, name string, namespace string) error
if err != nil {
return true, err
}
- return podRunning(p), nil
+ return PodRunning(p), nil
})
}
-// podsRunning will check the status conditions of the pod list and return true all pods are Running
-func podsRunning(podList *corev1.PodList) (bool, error) {
- // Pods are big, so use indexing, to avoid copying.
- for i := range podList.Items {
- if isRunning := podRunning(&podList.Items[i]); !isRunning {
+// PodsRunning will check the status conditions of the pod list and return true all pods are Running
+func PodsRunning(podList *corev1.PodList) (bool, error) {
+ for _, pod := range podList.Items {
+ if isRunning := PodRunning(&pod); !isRunning {
return false, nil
}
}
return true, nil
}
-// podRunning will check the status conditions of the pod and return true if it's Running.
-func podRunning(pod *corev1.Pod) bool {
+// PodRunning will check the status conditions of the pod and return true if it's Running
+func PodRunning(pod *corev1.Pod) bool {
return pod.Status.Phase == corev1.PodRunning || pod.Status.Phase == corev1.PodSucceeded
}
diff --git a/vendor/knative.dev/pkg/test/logging/logger_writer.go b/vendor/knative.dev/pkg/test/logging/logger_writer.go
new file mode 100644
index 00000000000..ccec48ac371
--- /dev/null
+++ b/vendor/knative.dev/pkg/test/logging/logger_writer.go
@@ -0,0 +1,57 @@
+/*
+Copyright 2020 The Knative Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+package logging
+
+import (
+ "bytes"
+ "io"
+ "strings"
+ "sync"
+)
+
+type loggerWriter struct {
+ prepend string
+ logf FormatLogger
+
+ buf bytes.Buffer
+ lock sync.Mutex
+}
+
+func NewLoggerWriter(prepend string, logf FormatLogger) io.Writer {
+ return &loggerWriter{
+ prepend: prepend,
+ logf: logf,
+ }
+}
+
+func (l *loggerWriter) Write(p []byte) (n int, err error) {
+ l.lock.Lock()
+ defer l.lock.Unlock()
+ s := string(p)
+ splitted := strings.Split(s, "\n")
+
+ // Get the remaining of the previous write
+ splitted[0] = l.buf.String() + splitted[0]
+ l.buf.Reset()
+
+ // Write out the lines
+ for i := 0; i < len(splitted)-1; i++ {
+ l.logf(l.prepend + splitted[i])
+ }
+
+ n, err = l.buf.WriteString(splitted[len(splitted)-1])
+ return
+}
diff --git a/vendor/knative.dev/pkg/test/logging/tlogger.go b/vendor/knative.dev/pkg/test/logging/tlogger.go
index e89e2294a83..ea6f3f0139b 100644
--- a/vendor/knative.dev/pkg/test/logging/tlogger.go
+++ b/vendor/knative.dev/pkg/test/logging/tlogger.go
@@ -181,11 +181,6 @@ func (o *TLogger) errorWithRuntimeCheck(stringThenKeysAndValues ...interface{})
}
}
-// Cleanup registers a cleanup callback.
-func (o *TLogger) Cleanup(c func()) {
- o.t.Cleanup(c)
-}
-
// Run a subtest. Just like testing.T.Run but creates a TLogger.
func (o *TLogger) Run(name string, f func(t *TLogger)) {
tfunc := func(ts *testing.T) {
diff --git a/vendor/knative.dev/pkg/test/monitoring/monitoring.go b/vendor/knative.dev/pkg/test/monitoring/monitoring.go
index 2dbaf958c30..1611250ccff 100644
--- a/vendor/knative.dev/pkg/test/monitoring/monitoring.go
+++ b/vendor/knative.dev/pkg/test/monitoring/monitoring.go
@@ -18,14 +18,19 @@ package monitoring
import (
"fmt"
+ "log"
"net"
- "os"
- "os/exec"
+ "net/http"
"strings"
+ "k8s.io/client-go/rest"
+ "k8s.io/client-go/tools/portforward"
+ "k8s.io/client-go/transport/spdy"
+
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
+
"knative.dev/pkg/test/logging"
)
@@ -52,34 +57,41 @@ func GetPods(kubeClientset *kubernetes.Clientset, app, namespace string) (*v1.Po
return pods, err
}
-// Cleanup will clean the background process used for port forwarding
-func Cleanup(pid int) error {
- ps := os.Process{Pid: pid}
- return ps.Kill()
-}
-
// PortForward sets up local port forward to the pod specified by the "app" label in the given namespace
-func PortForward(logf logging.FormatLogger, podList *v1.PodList, localPort, remotePort int, namespace string) (int, error) {
- podName := podList.Items[0].Name
- portFwdCmd := fmt.Sprintf("kubectl port-forward %s %d:%d -n %s", podName, localPort, remotePort, namespace)
- portFwdProcess, err := executeCmdBackground(logf, portFwdCmd)
+// To close the port forwarding, just close the channel
+func PortForward(logf logging.FormatLogger, config *rest.Config, clientSet *kubernetes.Clientset, pod *v1.Pod, localPort, remotePort int) (chan struct{}, error) {
+ req := clientSet.RESTClient().Post().Resource("pods").Namespace(pod.Namespace).Name(pod.Name).SubResource("portforward")
+ portForwardUrl := req.URL()
- if err != nil {
- return 0, fmt.Errorf("failed to port forward: %w", err)
+ // req could be generated without the /api/v1 prefix
+ if !strings.HasPrefix(portForwardUrl.Path, "/api/v1") {
+ portForwardUrl.Path = "/api/v1" + portForwardUrl.Path
}
- logf("running %s port-forward in background, pid = %d", podName, portFwdProcess.Pid)
- return portFwdProcess.Pid, nil
-}
+ stopChan := make(chan struct{})
+ readyChan := make(chan struct{})
-// RunBackground starts a background process and returns the Process if succeed
-func executeCmdBackground(logf logging.FormatLogger, format string, args ...interface{}) (*os.Process, error) {
- cmd := fmt.Sprintf(format, args...)
- logf("Executing command: %s", cmd)
- parts := strings.Split(cmd, " ")
- c := exec.Command(parts[0], parts[1:]...) // #nosec
- if err := c.Start(); err != nil {
- return nil, fmt.Errorf("%s command failed: %w", cmd, err)
+ transport, upgrader, err := spdy.RoundTripperFor(config)
+ dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, http.MethodPost, portForwardUrl)
+ fw, err := portforward.New(
+ dialer,
+ []string{fmt.Sprintf("%d:%d", localPort, remotePort)},
+ stopChan,
+ readyChan,
+ logging.NewLoggerWriter("port-forward-out-"+pod.Name+": ", logf),
+ logging.NewLoggerWriter("port-forward-err-"+pod.Name+": ", logf),
+ )
+ if err != nil {
+ return nil, err
}
- return c.Process, nil
+ go func() {
+ err := fw.ForwardPorts()
+ if err != nil {
+ log.Fatalf("Error opening the port forward for pod %s in ns %s with ports %d:%d, cause: %v", pod.Name, pod.Namespace, localPort, remotePort, err)
+ }
+ }()
+
+ <-readyChan
+ logf("Started port forwarding for pod %s in ns %s with ports %d:%d", pod.Name, pod.Namespace, localPort, remotePort)
+ return stopChan, nil
}
diff --git a/vendor/knative.dev/pkg/test/tinterface.go b/vendor/knative.dev/pkg/test/tinterface.go
index 3b2d72bb6ff..c4a87dae864 100644
--- a/vendor/knative.dev/pkg/test/tinterface.go
+++ b/vendor/knative.dev/pkg/test/tinterface.go
@@ -24,7 +24,6 @@ type T interface {
Name() string
Helper()
SkipNow()
- Cleanup(func())
Log(args ...interface{})
Error(args ...interface{})
}
diff --git a/vendor/knative.dev/pkg/test/zipkin/util.go b/vendor/knative.dev/pkg/test/zipkin/util.go
index b5b57491139..66b28670736 100644
--- a/vendor/knative.dev/pkg/test/zipkin/util.go
+++ b/vendor/knative.dev/pkg/test/zipkin/util.go
@@ -30,12 +30,15 @@ import (
"testing"
"time"
+ "k8s.io/client-go/rest"
+
tracingconfig "knative.dev/pkg/tracing/config"
"github.com/openzipkin/zipkin-go/model"
"go.opencensus.io/trace"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
+
"knative.dev/pkg/test/logging"
"knative.dev/pkg/test/monitoring"
)
@@ -57,7 +60,7 @@ const (
)
var (
- zipkinPortForwardPID int
+ zipkinPortForwardStopCh chan struct{}
// ZipkinTracingEnabled variable indicating if zipkin tracing is enabled.
ZipkinTracingEnabled = false
@@ -71,7 +74,7 @@ var (
// SetupZipkinTracingFromConfigTracing setups zipkin tracing like SetupZipkinTracing but retrieving the zipkin configuration
// from config-tracing config map
-func SetupZipkinTracingFromConfigTracing(kubeClientset *kubernetes.Clientset, logf logging.FormatLogger, configMapNamespace string) error {
+func SetupZipkinTracingFromConfigTracing(kubeconfig *rest.Config, kubeClientset *kubernetes.Clientset, logf logging.FormatLogger, configMapNamespace string) error {
cm, err := kubeClientset.CoreV1().ConfigMaps(configMapNamespace).Get("config-tracing", metav1.GetOptions{})
if err != nil {
return fmt.Errorf("error while retrieving config-tracing config map: %w", err)
@@ -98,12 +101,12 @@ func SetupZipkinTracingFromConfigTracing(kubeClientset *kubernetes.Clientset, lo
return fmt.Errorf("error while parsing the Zipkin endpoint in config-tracing config map: %w", err)
}
- return SetupZipkinTracing(kubeClientset, logf, int(port), namespace)
+ return SetupZipkinTracing(kubeconfig, kubeClientset, logf, int(port), namespace)
}
// SetupZipkinTracingFromConfigTracingOrFail is same as SetupZipkinTracingFromConfigTracing, but fails the test if an error happens
-func SetupZipkinTracingFromConfigTracingOrFail(t testing.TB, kubeClientset *kubernetes.Clientset, configMapNamespace string) {
- if err := SetupZipkinTracingFromConfigTracing(kubeClientset, t.Logf, configMapNamespace); err != nil {
+func SetupZipkinTracingFromConfigTracingOrFail(t testing.TB, kubeconfig *rest.Config, kubeClientset *kubernetes.Clientset, configMapNamespace string) {
+ if err := SetupZipkinTracingFromConfigTracing(kubeconfig, kubeClientset, t.Logf, configMapNamespace); err != nil {
t.Fatalf("Error while setup Zipkin tracing: %v", err)
}
}
@@ -113,7 +116,7 @@ func SetupZipkinTracingFromConfigTracingOrFail(t testing.TB, kubeClientset *kube
// (pid of the process doing Port-Forward is stored in a global variable).
// 2. Enable AlwaysSample config for tracing for the SpoofingClient.
// The zipkin deployment must have the label app=zipkin
-func SetupZipkinTracing(kubeClientset *kubernetes.Clientset, logf logging.FormatLogger, zipkinRemotePort int, zipkinNamespace string) (err error) {
+func SetupZipkinTracing(kubeconfig *rest.Config, kubeClientset *kubernetes.Clientset, logf logging.FormatLogger, zipkinRemotePort int, zipkinNamespace string) (err error) {
setupOnce.Do(func() {
if e := monitoring.CheckPortAvailability(zipkinRemotePort); e != nil {
err = fmt.Errorf("Zipkin port not available on the machine: %w", err)
@@ -126,13 +129,13 @@ func SetupZipkinTracing(kubeClientset *kubernetes.Clientset, logf logging.Format
return
}
- zipkinPortForwardPID, e = monitoring.PortForward(logf, zipkinPods, ZipkinPort, zipkinRemotePort, zipkinNamespace)
+ zipkinPortForwardStopCh, e = monitoring.PortForward(logf, kubeconfig, kubeClientset, &zipkinPods.Items[0], ZipkinPort, zipkinRemotePort)
if e != nil {
err = fmt.Errorf("error starting kubectl port-forward command: %w", err)
return
}
- logf("Zipkin port-forward process started with PID: %d", zipkinPortForwardPID)
+ logf("Zipkin port-forward started")
// Applying AlwaysSample config to ensure we propagate zipkin header for every request made by this client.
trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()})
@@ -142,8 +145,8 @@ func SetupZipkinTracing(kubeClientset *kubernetes.Clientset, logf logging.Format
}
// SetupZipkinTracingOrFail is same as SetupZipkinTracing, but fails the test if an error happens
-func SetupZipkinTracingOrFail(t testing.TB, kubeClientset *kubernetes.Clientset, zipkinRemotePort int, zipkinNamespace string) {
- if err := SetupZipkinTracing(kubeClientset, t.Logf, zipkinRemotePort, zipkinNamespace); err != nil {
+func SetupZipkinTracingOrFail(t testing.TB, kubeconfig *rest.Config, kubeClientset *kubernetes.Clientset, zipkinRemotePort int, zipkinNamespace string) {
+ if err := SetupZipkinTracing(kubeconfig, kubeClientset, t.Logf, zipkinRemotePort, zipkinNamespace); err != nil {
t.Fatalf("Error while setup zipkin tracing: %v", err)
}
}
@@ -168,9 +171,9 @@ func CleanupZipkinTracingSetup(logf logging.FormatLogger) {
return
}
- if err := monitoring.Cleanup(zipkinPortForwardPID); err != nil {
- logf("Encountered error killing port-forward process in CleanupZipkinTracingSetup() : %v", err)
- return
+ if zipkinPortForwardStopCh != nil {
+ close(zipkinPortForwardStopCh)
+ logf("Stopped zipkin port-forward")
}
ZipkinTracingEnabled = false
diff --git a/vendor/knative.dev/pkg/tracing/opencensus.go b/vendor/knative.dev/pkg/tracing/opencensus.go
index 48ca0087586..be29c223715 100644
--- a/vendor/knative.dev/pkg/tracing/opencensus.go
+++ b/vendor/knative.dev/pkg/tracing/opencensus.go
@@ -114,16 +114,6 @@ func createOCTConfig(cfg *config.Config) *trace.Config {
// WithExporter returns a ConfigOption for use with NewOpenCensusTracer that configures
// it to export traces based on the configuration read from config-tracing.
func WithExporter(name string, logger *zap.SugaredLogger) ConfigOption {
- return WithExporterFull(name, name, logger)
-}
-
-// WithExporterFull supports host argument for WithExporter.
-// The host arg is used for a value of tag ip="{IP}" so you can use an actual IP. Otherwise,
-// the host name must be able to be resolved.
-// e.g)
-// "name" is a service name like activator-service.
-// "host" is a endpoint IP like activator-service's endpint IP.
-func WithExporterFull(name, host string, logger *zap.SugaredLogger) ConfigOption {
return func(cfg *config.Config) error {
var (
exporter trace.Exporter
@@ -140,20 +130,18 @@ func WithExporterFull(name, host string, logger *zap.SugaredLogger) ConfigOption
}
exporter = exp
case config.Zipkin:
- // If host isn't specified, then zipkin.NewEndpoint will return an error saying that it
+ // If name isn't specified, then zipkin.NewEndpoint will return an error saying that it
// can't find the host named ''. So, if not specified, default it to this machine's
// hostname.
- if host == "" {
+ if name == "" {
n, err := os.Hostname()
if err != nil {
return fmt.Errorf("unable to get hostname: %w", err)
}
- host = n
- }
- if name == "" {
- name = host
+ name = n
}
- zipEP, err := zipkin.NewEndpoint(name, host)
+ hostPort := name + ":80"
+ zipEP, err := zipkin.NewEndpoint(name, hostPort)
if err != nil {
logger.Errorw("error building zipkin endpoint", zap.Error(err))
return err
diff --git a/vendor/knative.dev/pkg/webhook/resourcesemantics/defaulting/defaulting.go b/vendor/knative.dev/pkg/webhook/resourcesemantics/defaulting/defaulting.go
index faebc5f6e9a..80d1dcc9adf 100644
--- a/vendor/knative.dev/pkg/webhook/resourcesemantics/defaulting/defaulting.go
+++ b/vendor/knative.dev/pkg/webhook/resourcesemantics/defaulting/defaulting.go
@@ -186,11 +186,6 @@ func (ac *reconciler) reconcileMutatingWebhook(ctx context.Context, caCert []byt
MatchExpressions: []metav1.LabelSelectorRequirement{{
Key: "webhooks.knative.dev/exclude",
Operator: metav1.LabelSelectorOpDoesNotExist,
- }, {
- // "control-plane" is added to support Azure's AKS, otherwise the controllers fight.
- // See knative/pkg#1590 for details.
- Key: "control-plane",
- Operator: metav1.LabelSelectorOpDoesNotExist,
}},
}
webhook.Webhooks[i].ClientConfig.CABundle = caCert
diff --git a/vendor/knative.dev/pkg/webhook/resourcesemantics/validation/reconcile_config.go b/vendor/knative.dev/pkg/webhook/resourcesemantics/validation/reconcile_config.go
index e598ec5fd18..1537adc7892 100644
--- a/vendor/knative.dev/pkg/webhook/resourcesemantics/validation/reconcile_config.go
+++ b/vendor/knative.dev/pkg/webhook/resourcesemantics/validation/reconcile_config.go
@@ -149,11 +149,6 @@ func (ac *reconciler) reconcileValidatingWebhook(ctx context.Context, caCert []b
MatchExpressions: []metav1.LabelSelectorRequirement{{
Key: "webhooks.knative.dev/exclude",
Operator: metav1.LabelSelectorOpDoesNotExist,
- }, {
- // "control-plane" is added to support Azure's AKS, otherwise the controllers fight.
- // See knative/pkg#1590 for details.
- Key: "control-plane",
- Operator: metav1.LabelSelectorOpDoesNotExist,
}},
}
webhook.Webhooks[i].ClientConfig.CABundle = caCert
diff --git a/vendor/modules.txt b/vendor/modules.txt
index faa810db455..2c52b4f3c0c 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -22,8 +22,6 @@ contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/aws
contrib.go.opencensus.io/exporter/stackdriver/monitoredresource/gcp
# contrib.go.opencensus.io/exporter/zipkin v0.1.1
contrib.go.opencensus.io/exporter/zipkin
-# github.com/BurntSushi/toml v0.3.1
-github.com/BurntSushi/toml
# github.com/PuerkitoBio/purell v1.1.1
github.com/PuerkitoBio/purell
# github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578
@@ -104,6 +102,9 @@ github.com/cloudevents/sdk-go/v2/test
github.com/cloudevents/sdk-go/v2/types
# github.com/davecgh/go-spew v1.1.1
github.com/davecgh/go-spew/spew
+# github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96
+github.com/docker/spdystream
+github.com/docker/spdystream/spdy
# github.com/emicklei/go-restful v2.9.5+incompatible
github.com/emicklei/go-restful
github.com/emicklei/go-restful/log
@@ -328,11 +329,6 @@ go.opentelemetry.io/otel/api/trace
# go.uber.org/atomic v1.6.0
## explicit
go.uber.org/atomic
-# go.uber.org/automaxprocs v1.3.0
-go.uber.org/automaxprocs
-go.uber.org/automaxprocs/internal/cgroups
-go.uber.org/automaxprocs/internal/runtime
-go.uber.org/automaxprocs/maxprocs
# go.uber.org/multierr v1.5.0
go.uber.org/multierr
# go.uber.org/zap v1.15.0
@@ -396,21 +392,10 @@ golang.org/x/text/width
golang.org/x/time/rate
# golang.org/x/tools v0.0.0-20200731060945-b5fad4ed8dd6
golang.org/x/tools/cmd/goimports
-golang.org/x/tools/go/analysis
-golang.org/x/tools/go/analysis/passes/inspect
golang.org/x/tools/go/ast/astutil
-golang.org/x/tools/go/ast/inspector
-golang.org/x/tools/go/buildutil
golang.org/x/tools/go/gcexportdata
-golang.org/x/tools/go/internal/cgo
golang.org/x/tools/go/internal/gcimporter
-golang.org/x/tools/go/internal/packagesdriver
-golang.org/x/tools/go/loader
-golang.org/x/tools/go/packages
-golang.org/x/tools/go/types/objectpath
-golang.org/x/tools/go/types/typeutil
golang.org/x/tools/imports
-golang.org/x/tools/internal/analysisinternal
golang.org/x/tools/internal/event
golang.org/x/tools/internal/event/core
golang.org/x/tools/internal/event/keys
@@ -419,8 +404,6 @@ golang.org/x/tools/internal/fastwalk
golang.org/x/tools/internal/gocommand
golang.org/x/tools/internal/gopathwalk
golang.org/x/tools/internal/imports
-golang.org/x/tools/internal/packagesinternal
-golang.org/x/tools/internal/typesinternal
# golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543
golang.org/x/xerrors
golang.org/x/xerrors/internal
@@ -602,36 +585,6 @@ gopkg.in/yaml.v2
# gopkg.in/yaml.v3 v3.0.0-20191026110619-0b21df46bc1d
## explicit
gopkg.in/yaml.v3
-# honnef.co/go/tools v0.0.1-2020.1.4
-honnef.co/go/tools/arg
-honnef.co/go/tools/cmd/staticcheck
-honnef.co/go/tools/code
-honnef.co/go/tools/config
-honnef.co/go/tools/deprecated
-honnef.co/go/tools/edit
-honnef.co/go/tools/facts
-honnef.co/go/tools/functions
-honnef.co/go/tools/go/types/typeutil
-honnef.co/go/tools/internal/cache
-honnef.co/go/tools/internal/passes/buildir
-honnef.co/go/tools/internal/renameio
-honnef.co/go/tools/internal/robustio
-honnef.co/go/tools/internal/sharedcheck
-honnef.co/go/tools/ir
-honnef.co/go/tools/ir/irutil
-honnef.co/go/tools/lint
-honnef.co/go/tools/lint/lintdsl
-honnef.co/go/tools/lint/lintutil
-honnef.co/go/tools/lint/lintutil/format
-honnef.co/go/tools/loader
-honnef.co/go/tools/pattern
-honnef.co/go/tools/printf
-honnef.co/go/tools/report
-honnef.co/go/tools/simple
-honnef.co/go/tools/staticcheck
-honnef.co/go/tools/stylecheck
-honnef.co/go/tools/unused
-honnef.co/go/tools/version
# k8s.io/api v0.18.7-rc.0 => k8s.io/api v0.17.6
## explicit
k8s.io/api/admission/v1
@@ -729,6 +682,8 @@ k8s.io/apimachinery/pkg/util/clock
k8s.io/apimachinery/pkg/util/diff
k8s.io/apimachinery/pkg/util/errors
k8s.io/apimachinery/pkg/util/framer
+k8s.io/apimachinery/pkg/util/httpstream
+k8s.io/apimachinery/pkg/util/httpstream/spdy
k8s.io/apimachinery/pkg/util/intstr
k8s.io/apimachinery/pkg/util/json
k8s.io/apimachinery/pkg/util/mergepatch
@@ -746,6 +701,7 @@ k8s.io/apimachinery/pkg/util/yaml
k8s.io/apimachinery/pkg/version
k8s.io/apimachinery/pkg/watch
k8s.io/apimachinery/third_party/forked/golang/json
+k8s.io/apimachinery/third_party/forked/golang/netutil
k8s.io/apimachinery/third_party/forked/golang/reflect
# k8s.io/apiserver v0.17.6
## explicit
@@ -953,10 +909,12 @@ k8s.io/client-go/tools/leaderelection
k8s.io/client-go/tools/leaderelection/resourcelock
k8s.io/client-go/tools/metrics
k8s.io/client-go/tools/pager
+k8s.io/client-go/tools/portforward
k8s.io/client-go/tools/record
k8s.io/client-go/tools/record/util
k8s.io/client-go/tools/reference
k8s.io/client-go/transport
+k8s.io/client-go/transport/spdy
k8s.io/client-go/util/cert
k8s.io/client-go/util/connrotation
k8s.io/client-go/util/flowcontrol
@@ -1027,7 +985,7 @@ k8s.io/utils/buffer
k8s.io/utils/integer
k8s.io/utils/pointer
k8s.io/utils/trace
-# knative.dev/pkg v0.0.0-20200811233205-6b7eccba3b9c
+# knative.dev/pkg v0.0.0-20200811233205-6b7eccba3b9c => github.com/slinkydeveloper/pkg v0.0.0-20200724072217-fc14798189e0
## explicit
knative.dev/pkg/apiextensions/storageversion
knative.dev/pkg/apiextensions/storageversion/cmd/migrate
@@ -1164,3 +1122,4 @@ sigs.k8s.io/yaml
# k8s.io/apimachinery => k8s.io/apimachinery v0.17.6
# k8s.io/client-go => k8s.io/client-go v0.17.6
# k8s.io/code-generator => k8s.io/code-generator v0.17.6
+# knative.dev/pkg => github.com/slinkydeveloper/pkg v0.0.0-20200724072217-fc14798189e0