mirror of
				https://github.com/optim-enterprises-bv/kubernetes.git
				synced 2025-10-30 17:58:14 +00:00 
			
		
		
		
	Vendor staticcheck
This commit is contained in:
		
							
								
								
									
										57
									
								
								Godeps/LICENSES
									
									
									
										generated
									
									
									
								
							
							
						
						
									
										57
									
								
								Godeps/LICENSES
									
									
									
										generated
									
									
									
								
							| @@ -2632,6 +2632,35 @@ THE SOFTWARE. | ||||
| ================================================================================ | ||||
| 
 | ||||
| 
 | ||||
| ================================================================================ | ||||
| = vendor/github.com/BurntSushi/toml licensed under: = | ||||
| 
 | ||||
| The MIT License (MIT) | ||||
| 
 | ||||
| Copyright (c) 2013 TOML authors | ||||
| 
 | ||||
| Permission is hereby granted, free of charge, to any person obtaining a copy | ||||
| of this software and associated documentation files (the "Software"), to deal | ||||
| in the Software without restriction, including without limitation the rights | ||||
| to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | ||||
| copies of the Software, and to permit persons to whom the Software is | ||||
| furnished to do so, subject to the following conditions: | ||||
| 
 | ||||
| The above copyright notice and this permission notice shall be included in | ||||
| all copies or substantial portions of the Software. | ||||
| 
 | ||||
| THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||||
| IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||||
| FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||||
| AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||||
| LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||||
| OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | ||||
| THE SOFTWARE. | ||||
| 
 | ||||
| = vendor/github.com/BurntSushi/toml/COPYING 9e24c0e2a784c1d1fcabb279f4f107e0 | ||||
| ================================================================================ | ||||
| 
 | ||||
| 
 | ||||
| ================================================================================ | ||||
| = vendor/github.com/caddyserver/caddy licensed under: = | ||||
| 
 | ||||
| @@ -21240,6 +21269,34 @@ limitations under the License. | ||||
| ================================================================================ | ||||
| 
 | ||||
| 
 | ||||
| ================================================================================ | ||||
| = vendor/honnef.co/go/tools licensed under: = | ||||
| 
 | ||||
| Copyright (c) 2016 Dominik Honnef | ||||
| 
 | ||||
| Permission is hereby granted, free of charge, to any person obtaining | ||||
| a copy of this software and associated documentation files (the | ||||
| "Software"), to deal in the Software without restriction, including | ||||
| without limitation the rights to use, copy, modify, merge, publish, | ||||
| distribute, sublicense, and/or sell copies of the Software, and to | ||||
| permit persons to whom the Software is furnished to do so, subject to | ||||
| the following conditions: | ||||
| 
 | ||||
| The above copyright notice and this permission notice shall be | ||||
| included in all copies or substantial portions of the Software. | ||||
| 
 | ||||
| THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||||
| EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||||
| MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||||
| NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE | ||||
| LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | ||||
| OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | ||||
| WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||||
| 
 | ||||
| = vendor/honnef.co/go/tools/LICENSE ca0492e2abd2c12b3f3c25d6e33972ad | ||||
| ================================================================================ | ||||
| 
 | ||||
| 
 | ||||
| ================================================================================ | ||||
| = vendor/k8s.io/gengo licensed under: = | ||||
| 
 | ||||
|   | ||||
| @@ -29,6 +29,7 @@ import ( | ||||
| 	_ "golang.org/x/lint/golint" | ||||
| 	_ "gotest.tools" | ||||
| 	_ "gotest.tools/gotestsum" | ||||
| 	_ "honnef.co/go/tools/cmd/staticcheck" | ||||
| 	_ "k8s.io/code-generator/cmd/go-to-protobuf" | ||||
| 	_ "k8s.io/code-generator/cmd/go-to-protobuf/protoc-gen-gogo" | ||||
| 	_ "k8s.io/gengo/examples/deepcopy-gen/generators" | ||||
|   | ||||
							
								
								
									
										8
									
								
								go.mod
									
									
									
									
									
								
							
							
						
						
									
										8
									
								
								go.mod
									
									
									
									
									
								
							| @@ -134,7 +134,7 @@ require ( | ||||
| 	golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a | ||||
| 	golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f | ||||
| 	golang.org/x/time v0.0.0-20161028155119-f51c12702a4d | ||||
| 	golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59 | ||||
| 	golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac | ||||
| 	gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485 | ||||
| 	google.golang.org/api v0.0.0-20181220000619-583d854617af | ||||
| 	google.golang.org/grpc v1.13.0 | ||||
| @@ -143,6 +143,7 @@ require ( | ||||
| 	gopkg.in/yaml.v2 v2.2.2 | ||||
| 	gotest.tools v2.2.0+incompatible | ||||
| 	gotest.tools/gotestsum v0.3.5 | ||||
| 	honnef.co/go/tools v0.0.1-2019.2.2 | ||||
| 	k8s.io/api v0.0.0 | ||||
| 	k8s.io/apiextensions-apiserver v0.0.0 | ||||
| 	k8s.io/apimachinery v0.0.0 | ||||
| @@ -285,6 +286,7 @@ replace ( | ||||
| 	github.com/google/certificate-transparency-go => github.com/google/certificate-transparency-go v1.0.21 | ||||
| 	github.com/google/go-cmp => github.com/google/go-cmp v0.3.0 | ||||
| 	github.com/google/gofuzz => github.com/google/gofuzz v1.0.0 | ||||
| 	github.com/google/renameio => github.com/google/renameio v0.1.0 | ||||
| 	github.com/google/uuid => github.com/google/uuid v1.1.1 | ||||
| 	github.com/googleapis/gnostic => github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d | ||||
| 	github.com/gophercloud/gophercloud => github.com/gophercloud/gophercloud v0.1.0 | ||||
| @@ -374,6 +376,7 @@ replace ( | ||||
| 	github.com/quobyte/api => github.com/quobyte/api v0.1.2 | ||||
| 	github.com/remyoudompheng/bigfft => github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446 | ||||
| 	github.com/robfig/cron => github.com/robfig/cron v1.1.0 | ||||
| 	github.com/rogpeppe/go-internal => github.com/rogpeppe/go-internal v1.3.0 | ||||
| 	github.com/rubiojr/go-vhd => github.com/rubiojr/go-vhd v0.0.0-20160810183302-0bfd3b39853c | ||||
| 	github.com/russross/blackfriday => github.com/russross/blackfriday v1.5.2 | ||||
| 	github.com/satori/go.uuid => github.com/satori/go.uuid v1.2.0 | ||||
| @@ -410,6 +413,7 @@ replace ( | ||||
| 	golang.org/x/image => golang.org/x/image v0.0.0-20190227222117-0694c2d4d067 | ||||
| 	golang.org/x/lint => golang.org/x/lint v0.0.0-20181217174547-8f45f776aaf1 | ||||
| 	golang.org/x/mobile => golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6 | ||||
| 	golang.org/x/mod => golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e | ||||
| 	golang.org/x/net => golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc | ||||
| 	golang.org/x/oauth2 => golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a | ||||
| 	golang.org/x/sync => golang.org/x/sync v0.0.0-20181108010431-42b317875d0f | ||||
| @@ -425,6 +429,7 @@ replace ( | ||||
| 	google.golang.org/grpc => google.golang.org/grpc v1.13.0 | ||||
| 	gopkg.in/airbrake/gobrake.v2 => gopkg.in/airbrake/gobrake.v2 v2.0.9 | ||||
| 	gopkg.in/check.v1 => gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 | ||||
| 	gopkg.in/errgo.v2 => gopkg.in/errgo.v2 v2.1.0 | ||||
| 	gopkg.in/fsnotify.v1 => gopkg.in/fsnotify.v1 v1.4.7 | ||||
| 	gopkg.in/gcfg.v1 => gopkg.in/gcfg.v1 v1.2.0 | ||||
| 	gopkg.in/gemnasium/logrus-airbrake-hook.v2 => gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 | ||||
| @@ -437,6 +442,7 @@ replace ( | ||||
| 	gopkg.in/yaml.v2 => gopkg.in/yaml.v2 v2.2.2 | ||||
| 	gotest.tools => gotest.tools v2.2.0+incompatible | ||||
| 	gotest.tools/gotestsum => gotest.tools/gotestsum v0.3.5 | ||||
| 	honnef.co/go/tools => honnef.co/go/tools v0.0.1-2019.2.2 | ||||
| 	k8s.io/api => ./staging/src/k8s.io/api | ||||
| 	k8s.io/apiextensions-apiserver => ./staging/src/k8s.io/apiextensions-apiserver | ||||
| 	k8s.io/apimachinery => ./staging/src/k8s.io/apimachinery | ||||
|   | ||||
							
								
								
									
										6
									
								
								go.sum
									
									
									
									
									
								
							
							
						
						
									
										6
									
								
								go.sum
									
									
									
									
									
								
							| @@ -206,6 +206,7 @@ github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= | ||||
| github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= | ||||
| github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= | ||||
| github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= | ||||
| github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= | ||||
| github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= | ||||
| github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= | ||||
| github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d h1:7XGaL1e6bYS1yIonGp9761ExpPPV1ui0SAC59Yube9k= | ||||
| @@ -367,6 +368,7 @@ github.com/quobyte/api v0.1.2/go.mod h1:jL7lIHrmqQ7yh05OJ+eEEdHr0u/kmT1Ff9iHd+4H | ||||
| github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M= | ||||
| github.com/robfig/cron v1.1.0 h1:jk4/Hud3TTdcrJgUOBgsqrZBarcxl6ADIjSC2iniwLY= | ||||
| github.com/robfig/cron v1.1.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= | ||||
| github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= | ||||
| github.com/rubiojr/go-vhd v0.0.0-20160810183302-0bfd3b39853c h1:ht7N4d/B7Ezf58nvMNVF3OlvDlz9pp+WHVcRNS0nink= | ||||
| github.com/rubiojr/go-vhd v0.0.0-20160810183302-0bfd3b39853c/go.mod h1:DM5xW0nvfNNm2uytzsvhI3OnX8uzaRAg8UX/CnDqbto= | ||||
| github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= | ||||
| @@ -435,6 +437,7 @@ golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMx | ||||
| golang.org/x/lint v0.0.0-20181217174547-8f45f776aaf1 h1:rJm0LuqUjoDhSk2zO9ISMSToQxGz7Os2jRiOL8AWu4c= | ||||
| golang.org/x/lint v0.0.0-20181217174547-8f45f776aaf1/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= | ||||
| golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= | ||||
| golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= | ||||
| golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc h1:gkKoSkUmnU6bpS/VhkuO27bzQeSA51uaEfbOW5dNb68= | ||||
| golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= | ||||
| golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a h1:tImsplftrFpALCYumobsd0K86vlAs/eXGFms2txfJfA= | ||||
| @@ -464,6 +467,7 @@ google.golang.org/grpc v1.13.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmE | ||||
| gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= | ||||
| gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= | ||||
| gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= | ||||
| gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= | ||||
| gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= | ||||
| gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= | ||||
| gopkg.in/gcfg.v1 v1.2.0 h1:0HIbH907iBTAntm+88IJV2qmJALDAh8sPekI9Vc1fm0= | ||||
| @@ -486,6 +490,8 @@ gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= | ||||
| gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= | ||||
| gotest.tools/gotestsum v0.3.5 h1:VePOWRsuWFYpfp/G8mbmOZKxO5T3501SEGQRUdvq7h0= | ||||
| gotest.tools/gotestsum v0.3.5/go.mod h1:Mnf3e5FUzXbkCfynWBGOwLssY7gTQgCHObK9tMpAriY= | ||||
| honnef.co/go/tools v0.0.1-2019.2.2 h1:TEgegKbBqByGUb1Coo1pc2qIdf2xw6v0mYyLSYtyopE= | ||||
| honnef.co/go/tools v0.0.1-2019.2.2/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= | ||||
| k8s.io/gengo v0.0.0-20190813173942-955ffa8fcfc9 h1:/gJp8cw8+k4AxBNGZ5u5eRoCOzH3WVpY02K654HNiyU= | ||||
| k8s.io/gengo v0.0.0-20190813173942-955ffa8fcfc9/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= | ||||
| k8s.io/heapster v1.2.0-beta.1 h1:lUsE/AHOMHpi3MLlBEkaU8Esxm5QhdyCrv1o7ot0s84= | ||||
|   | ||||
| @@ -12,6 +12,7 @@ require ( | ||||
| 	github.com/modern-go/reflect2 v1.0.1 // indirect | ||||
| 	github.com/spf13/pflag v1.0.3 | ||||
| 	golang.org/x/net v0.0.0-20190812203447-cdfb69ac37fc // indirect | ||||
| 	golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac // indirect | ||||
| 	gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485 | ||||
| 	gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e // indirect | ||||
| 	k8s.io/gengo v0.0.0-20190813173942-955ffa8fcfc9 | ||||
|   | ||||
							
								
								
									
										27
									
								
								vendor/BUILD
									
									
									
									
										vendored
									
									
								
							
							
						
						
									
										27
									
								
								vendor/BUILD
									
									
									
									
										vendored
									
									
								
							| @@ -21,6 +21,7 @@ filegroup( | ||||
|         "//vendor/github.com/Azure/go-autorest/autorest:all-srcs", | ||||
|         "//vendor/github.com/Azure/go-autorest/logger:all-srcs", | ||||
|         "//vendor/github.com/Azure/go-autorest/tracing:all-srcs", | ||||
|         "//vendor/github.com/BurntSushi/toml:all-srcs", | ||||
|         "//vendor/github.com/GoogleCloudPlatform/k8s-cloud-provider/pkg/cloud:all-srcs", | ||||
|         "//vendor/github.com/JeffAshton/win_pdh:all-srcs", | ||||
|         "//vendor/github.com/MakeNowJust/heredoc:all-srcs", | ||||
| @@ -399,12 +400,17 @@ filegroup( | ||||
|         "//vendor/golang.org/x/time/rate:all-srcs", | ||||
|         "//vendor/golang.org/x/tools/benchmark/parse:all-srcs", | ||||
|         "//vendor/golang.org/x/tools/container/intsets:all-srcs", | ||||
|         "//vendor/golang.org/x/tools/go/analysis:all-srcs", | ||||
|         "//vendor/golang.org/x/tools/go/ast/astutil:all-srcs", | ||||
|         "//vendor/golang.org/x/tools/go/ast/inspector:all-srcs", | ||||
|         "//vendor/golang.org/x/tools/go/buildutil:all-srcs", | ||||
|         "//vendor/golang.org/x/tools/go/gcexportdata:all-srcs", | ||||
|         "//vendor/golang.org/x/tools/go/internal/cgo:all-srcs", | ||||
|         "//vendor/golang.org/x/tools/go/internal/gcimporter:all-srcs", | ||||
|         "//vendor/golang.org/x/tools/go/internal/packagesdriver:all-srcs", | ||||
|         "//vendor/golang.org/x/tools/go/packages:all-srcs", | ||||
|         "//vendor/golang.org/x/tools/go/types/objectpath:all-srcs", | ||||
|         "//vendor/golang.org/x/tools/go/types/typeutil:all-srcs", | ||||
|         "//vendor/golang.org/x/tools/go/vcs:all-srcs", | ||||
|         "//vendor/golang.org/x/tools/imports:all-srcs", | ||||
|         "//vendor/golang.org/x/tools/internal/fastwalk:all-srcs", | ||||
| @@ -444,6 +450,27 @@ filegroup( | ||||
|         "//vendor/gopkg.in/warnings.v0:all-srcs", | ||||
|         "//vendor/gopkg.in/yaml.v2:all-srcs", | ||||
|         "//vendor/gotest.tools:all-srcs", | ||||
|         "//vendor/honnef.co/go/tools/arg:all-srcs", | ||||
|         "//vendor/honnef.co/go/tools/cmd/staticcheck:all-srcs", | ||||
|         "//vendor/honnef.co/go/tools/config:all-srcs", | ||||
|         "//vendor/honnef.co/go/tools/deprecated:all-srcs", | ||||
|         "//vendor/honnef.co/go/tools/facts:all-srcs", | ||||
|         "//vendor/honnef.co/go/tools/functions:all-srcs", | ||||
|         "//vendor/honnef.co/go/tools/go/types/typeutil:all-srcs", | ||||
|         "//vendor/honnef.co/go/tools/internal/cache:all-srcs", | ||||
|         "//vendor/honnef.co/go/tools/internal/passes/buildssa:all-srcs", | ||||
|         "//vendor/honnef.co/go/tools/internal/renameio:all-srcs", | ||||
|         "//vendor/honnef.co/go/tools/internal/sharedcheck:all-srcs", | ||||
|         "//vendor/honnef.co/go/tools/lint:all-srcs", | ||||
|         "//vendor/honnef.co/go/tools/loader:all-srcs", | ||||
|         "//vendor/honnef.co/go/tools/printf:all-srcs", | ||||
|         "//vendor/honnef.co/go/tools/simple:all-srcs", | ||||
|         "//vendor/honnef.co/go/tools/ssa:all-srcs", | ||||
|         "//vendor/honnef.co/go/tools/ssautil:all-srcs", | ||||
|         "//vendor/honnef.co/go/tools/staticcheck:all-srcs", | ||||
|         "//vendor/honnef.co/go/tools/stylecheck:all-srcs", | ||||
|         "//vendor/honnef.co/go/tools/unused:all-srcs", | ||||
|         "//vendor/honnef.co/go/tools/version:all-srcs", | ||||
|         "//vendor/k8s.io/gengo/args:all-srcs", | ||||
|         "//vendor/k8s.io/gengo/examples/deepcopy-gen/generators:all-srcs", | ||||
|         "//vendor/k8s.io/gengo/examples/defaulter-gen/generators:all-srcs", | ||||
|   | ||||
							
								
								
									
										5
									
								
								vendor/github.com/BurntSushi/toml/.gitignore
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										5
									
								
								vendor/github.com/BurntSushi/toml/.gitignore
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,5 @@ | ||||
| TAGS | ||||
| tags | ||||
| .*.swp | ||||
| tomlcheck/tomlcheck | ||||
| toml.test | ||||
							
								
								
									
										15
									
								
								vendor/github.com/BurntSushi/toml/.travis.yml
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										15
									
								
								vendor/github.com/BurntSushi/toml/.travis.yml
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,15 @@ | ||||
| language: go | ||||
| go: | ||||
|   - 1.1 | ||||
|   - 1.2 | ||||
|   - 1.3 | ||||
|   - 1.4 | ||||
|   - 1.5 | ||||
|   - 1.6 | ||||
|   - tip | ||||
| install: | ||||
|   - go install ./... | ||||
|   - go get github.com/BurntSushi/toml-test | ||||
| script: | ||||
|   - export PATH="$PATH:$HOME/gopath/bin" | ||||
|   - make test | ||||
							
								
								
									
										34
									
								
								vendor/github.com/BurntSushi/toml/BUILD
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										34
									
								
								vendor/github.com/BurntSushi/toml/BUILD
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,34 @@ | ||||
| load("@io_bazel_rules_go//go:def.bzl", "go_library") | ||||
|  | ||||
| go_library( | ||||
|     name = "go_default_library", | ||||
|     srcs = [ | ||||
|         "decode.go", | ||||
|         "decode_meta.go", | ||||
|         "doc.go", | ||||
|         "encode.go", | ||||
|         "encoding_types.go", | ||||
|         "encoding_types_1.1.go", | ||||
|         "lex.go", | ||||
|         "parse.go", | ||||
|         "type_check.go", | ||||
|         "type_fields.go", | ||||
|     ], | ||||
|     importmap = "k8s.io/kubernetes/vendor/github.com/BurntSushi/toml", | ||||
|     importpath = "github.com/BurntSushi/toml", | ||||
|     visibility = ["//visibility:public"], | ||||
| ) | ||||
|  | ||||
| filegroup( | ||||
|     name = "package-srcs", | ||||
|     srcs = glob(["**"]), | ||||
|     tags = ["automanaged"], | ||||
|     visibility = ["//visibility:private"], | ||||
| ) | ||||
|  | ||||
| filegroup( | ||||
|     name = "all-srcs", | ||||
|     srcs = [":package-srcs"], | ||||
|     tags = ["automanaged"], | ||||
|     visibility = ["//visibility:public"], | ||||
| ) | ||||
							
								
								
									
										3
									
								
								vendor/github.com/BurntSushi/toml/COMPATIBLE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										3
									
								
								vendor/github.com/BurntSushi/toml/COMPATIBLE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,3 @@ | ||||
| Compatible with TOML version | ||||
| [v0.4.0](https://github.com/toml-lang/toml/blob/v0.4.0/versions/en/toml-v0.4.0.md) | ||||
|  | ||||
							
								
								
									
										21
									
								
								vendor/github.com/BurntSushi/toml/COPYING
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										21
									
								
								vendor/github.com/BurntSushi/toml/COPYING
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,21 @@ | ||||
| The MIT License (MIT) | ||||
|  | ||||
| Copyright (c) 2013 TOML authors | ||||
|  | ||||
| Permission is hereby granted, free of charge, to any person obtaining a copy | ||||
| of this software and associated documentation files (the "Software"), to deal | ||||
| in the Software without restriction, including without limitation the rights | ||||
| to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | ||||
| copies of the Software, and to permit persons to whom the Software is | ||||
| furnished to do so, subject to the following conditions: | ||||
|  | ||||
| The above copyright notice and this permission notice shall be included in | ||||
| all copies or substantial portions of the Software. | ||||
|  | ||||
| THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||||
| IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||||
| FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||||
| AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||||
| LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||||
| OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | ||||
| THE SOFTWARE. | ||||
							
								
								
									
										19
									
								
								vendor/github.com/BurntSushi/toml/Makefile
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										19
									
								
								vendor/github.com/BurntSushi/toml/Makefile
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,19 @@ | ||||
| install: | ||||
| 	go install ./... | ||||
|  | ||||
| test: install | ||||
| 	go test -v | ||||
| 	toml-test toml-test-decoder | ||||
| 	toml-test -encoder toml-test-encoder | ||||
|  | ||||
| fmt: | ||||
| 	gofmt -w *.go */*.go | ||||
| 	colcheck *.go */*.go | ||||
|  | ||||
| tags: | ||||
| 	find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS | ||||
|  | ||||
| push: | ||||
| 	git push origin master | ||||
| 	git push github master | ||||
|  | ||||
							
								
								
									
										218
									
								
								vendor/github.com/BurntSushi/toml/README.md
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										218
									
								
								vendor/github.com/BurntSushi/toml/README.md
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,218 @@ | ||||
| ## TOML parser and encoder for Go with reflection | ||||
|  | ||||
| TOML stands for Tom's Obvious, Minimal Language. This Go package provides a | ||||
| reflection interface similar to Go's standard library `json` and `xml` | ||||
| packages. This package also supports the `encoding.TextUnmarshaler` and | ||||
| `encoding.TextMarshaler` interfaces so that you can define custom data | ||||
| representations. (There is an example of this below.) | ||||
|  | ||||
| Spec: https://github.com/toml-lang/toml | ||||
|  | ||||
| Compatible with TOML version | ||||
| [v0.4.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.4.0.md) | ||||
|  | ||||
| Documentation: https://godoc.org/github.com/BurntSushi/toml | ||||
|  | ||||
| Installation: | ||||
|  | ||||
| ```bash | ||||
| go get github.com/BurntSushi/toml | ||||
| ``` | ||||
|  | ||||
| Try the toml validator: | ||||
|  | ||||
| ```bash | ||||
| go get github.com/BurntSushi/toml/cmd/tomlv | ||||
| tomlv some-toml-file.toml | ||||
| ``` | ||||
|  | ||||
| [](https://travis-ci.org/BurntSushi/toml) [](https://godoc.org/github.com/BurntSushi/toml) | ||||
|  | ||||
| ### Testing | ||||
|  | ||||
| This package passes all tests in | ||||
| [toml-test](https://github.com/BurntSushi/toml-test) for both the decoder | ||||
| and the encoder. | ||||
|  | ||||
| ### Examples | ||||
|  | ||||
| This package works similarly to how the Go standard library handles `XML` | ||||
| and `JSON`. Namely, data is loaded into Go values via reflection. | ||||
|  | ||||
| For the simplest example, consider some TOML file as just a list of keys | ||||
| and values: | ||||
|  | ||||
| ```toml | ||||
| Age = 25 | ||||
| Cats = [ "Cauchy", "Plato" ] | ||||
| Pi = 3.14 | ||||
| Perfection = [ 6, 28, 496, 8128 ] | ||||
| DOB = 1987-07-05T05:45:00Z | ||||
| ``` | ||||
|  | ||||
| Which could be defined in Go as: | ||||
|  | ||||
| ```go | ||||
| type Config struct { | ||||
|   Age int | ||||
|   Cats []string | ||||
|   Pi float64 | ||||
|   Perfection []int | ||||
|   DOB time.Time // requires `import time` | ||||
| } | ||||
| ``` | ||||
|  | ||||
| And then decoded with: | ||||
|  | ||||
| ```go | ||||
| var conf Config | ||||
| if _, err := toml.Decode(tomlData, &conf); err != nil { | ||||
|   // handle error | ||||
| } | ||||
| ``` | ||||
|  | ||||
| You can also use struct tags if your struct field name doesn't map to a TOML | ||||
| key value directly: | ||||
|  | ||||
| ```toml | ||||
| some_key_NAME = "wat" | ||||
| ``` | ||||
|  | ||||
| ```go | ||||
| type TOML struct { | ||||
|   ObscureKey string `toml:"some_key_NAME"` | ||||
| } | ||||
| ``` | ||||
|  | ||||
| ### Using the `encoding.TextUnmarshaler` interface | ||||
|  | ||||
| Here's an example that automatically parses duration strings into | ||||
| `time.Duration` values: | ||||
|  | ||||
| ```toml | ||||
| [[song]] | ||||
| name = "Thunder Road" | ||||
| duration = "4m49s" | ||||
|  | ||||
| [[song]] | ||||
| name = "Stairway to Heaven" | ||||
| duration = "8m03s" | ||||
| ``` | ||||
|  | ||||
| Which can be decoded with: | ||||
|  | ||||
| ```go | ||||
| type song struct { | ||||
|   Name     string | ||||
|   Duration duration | ||||
| } | ||||
| type songs struct { | ||||
|   Song []song | ||||
| } | ||||
| var favorites songs | ||||
| if _, err := toml.Decode(blob, &favorites); err != nil { | ||||
|   log.Fatal(err) | ||||
| } | ||||
|  | ||||
| for _, s := range favorites.Song { | ||||
|   fmt.Printf("%s (%s)\n", s.Name, s.Duration) | ||||
| } | ||||
| ``` | ||||
|  | ||||
| And you'll also need a `duration` type that satisfies the | ||||
| `encoding.TextUnmarshaler` interface: | ||||
|  | ||||
| ```go | ||||
| type duration struct { | ||||
| 	time.Duration | ||||
| } | ||||
|  | ||||
| func (d *duration) UnmarshalText(text []byte) error { | ||||
| 	var err error | ||||
| 	d.Duration, err = time.ParseDuration(string(text)) | ||||
| 	return err | ||||
| } | ||||
| ``` | ||||
|  | ||||
| ### More complex usage | ||||
|  | ||||
| Here's an example of how to load the example from the official spec page: | ||||
|  | ||||
| ```toml | ||||
| # This is a TOML document. Boom. | ||||
|  | ||||
| title = "TOML Example" | ||||
|  | ||||
| [owner] | ||||
| name = "Tom Preston-Werner" | ||||
| organization = "GitHub" | ||||
| bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." | ||||
| dob = 1979-05-27T07:32:00Z # First class dates? Why not? | ||||
|  | ||||
| [database] | ||||
| server = "192.168.1.1" | ||||
| ports = [ 8001, 8001, 8002 ] | ||||
| connection_max = 5000 | ||||
| enabled = true | ||||
|  | ||||
| [servers] | ||||
|  | ||||
|   # You can indent as you please. Tabs or spaces. TOML don't care. | ||||
|   [servers.alpha] | ||||
|   ip = "10.0.0.1" | ||||
|   dc = "eqdc10" | ||||
|  | ||||
|   [servers.beta] | ||||
|   ip = "10.0.0.2" | ||||
|   dc = "eqdc10" | ||||
|  | ||||
| [clients] | ||||
| data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it | ||||
|  | ||||
| # Line breaks are OK when inside arrays | ||||
| hosts = [ | ||||
|   "alpha", | ||||
|   "omega" | ||||
| ] | ||||
| ``` | ||||
|  | ||||
| And the corresponding Go types are: | ||||
|  | ||||
| ```go | ||||
| type tomlConfig struct { | ||||
| 	Title string | ||||
| 	Owner ownerInfo | ||||
| 	DB database `toml:"database"` | ||||
| 	Servers map[string]server | ||||
| 	Clients clients | ||||
| } | ||||
|  | ||||
| type ownerInfo struct { | ||||
| 	Name string | ||||
| 	Org string `toml:"organization"` | ||||
| 	Bio string | ||||
| 	DOB time.Time | ||||
| } | ||||
|  | ||||
| type database struct { | ||||
| 	Server string | ||||
| 	Ports []int | ||||
| 	ConnMax int `toml:"connection_max"` | ||||
| 	Enabled bool | ||||
| } | ||||
|  | ||||
| type server struct { | ||||
| 	IP string | ||||
| 	DC string | ||||
| } | ||||
|  | ||||
| type clients struct { | ||||
| 	Data [][]interface{} | ||||
| 	Hosts []string | ||||
| } | ||||
| ``` | ||||
|  | ||||
| Note that a case insensitive match will be tried if an exact match can't be | ||||
| found. | ||||
|  | ||||
| A working example of the above can be found in `_examples/example.{go,toml}`. | ||||
							
								
								
									
										509
									
								
								vendor/github.com/BurntSushi/toml/decode.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										509
									
								
								vendor/github.com/BurntSushi/toml/decode.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,509 @@ | ||||
| package toml | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"io/ioutil" | ||||
| 	"math" | ||||
| 	"reflect" | ||||
| 	"strings" | ||||
| 	"time" | ||||
| ) | ||||
|  | ||||
| func e(format string, args ...interface{}) error { | ||||
| 	return fmt.Errorf("toml: "+format, args...) | ||||
| } | ||||
|  | ||||
| // Unmarshaler is the interface implemented by objects that can unmarshal a | ||||
| // TOML description of themselves. | ||||
| type Unmarshaler interface { | ||||
| 	UnmarshalTOML(interface{}) error | ||||
| } | ||||
|  | ||||
| // Unmarshal decodes the contents of `p` in TOML format into a pointer `v`. | ||||
| func Unmarshal(p []byte, v interface{}) error { | ||||
| 	_, err := Decode(string(p), v) | ||||
| 	return err | ||||
| } | ||||
|  | ||||
| // Primitive is a TOML value that hasn't been decoded into a Go value. | ||||
| // When using the various `Decode*` functions, the type `Primitive` may | ||||
| // be given to any value, and its decoding will be delayed. | ||||
| // | ||||
| // A `Primitive` value can be decoded using the `PrimitiveDecode` function. | ||||
| // | ||||
| // The underlying representation of a `Primitive` value is subject to change. | ||||
| // Do not rely on it. | ||||
| // | ||||
| // N.B. Primitive values are still parsed, so using them will only avoid | ||||
| // the overhead of reflection. They can be useful when you don't know the | ||||
| // exact type of TOML data until run time. | ||||
| type Primitive struct { | ||||
| 	undecoded interface{} | ||||
| 	context   Key | ||||
| } | ||||
|  | ||||
| // DEPRECATED! | ||||
| // | ||||
| // Use MetaData.PrimitiveDecode instead. | ||||
| func PrimitiveDecode(primValue Primitive, v interface{}) error { | ||||
| 	md := MetaData{decoded: make(map[string]bool)} | ||||
| 	return md.unify(primValue.undecoded, rvalue(v)) | ||||
| } | ||||
|  | ||||
| // PrimitiveDecode is just like the other `Decode*` functions, except it | ||||
| // decodes a TOML value that has already been parsed. Valid primitive values | ||||
| // can *only* be obtained from values filled by the decoder functions, | ||||
| // including this method. (i.e., `v` may contain more `Primitive` | ||||
| // values.) | ||||
| // | ||||
| // Meta data for primitive values is included in the meta data returned by | ||||
| // the `Decode*` functions with one exception: keys returned by the Undecoded | ||||
| // method will only reflect keys that were decoded. Namely, any keys hidden | ||||
| // behind a Primitive will be considered undecoded. Executing this method will | ||||
| // update the undecoded keys in the meta data. (See the example.) | ||||
| func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { | ||||
| 	md.context = primValue.context | ||||
| 	defer func() { md.context = nil }() | ||||
| 	return md.unify(primValue.undecoded, rvalue(v)) | ||||
| } | ||||
|  | ||||
| // Decode will decode the contents of `data` in TOML format into a pointer | ||||
| // `v`. | ||||
| // | ||||
| // TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be | ||||
| // used interchangeably.) | ||||
| // | ||||
| // TOML arrays of tables correspond to either a slice of structs or a slice | ||||
| // of maps. | ||||
| // | ||||
| // TOML datetimes correspond to Go `time.Time` values. | ||||
| // | ||||
| // All other TOML types (float, string, int, bool and array) correspond | ||||
| // to the obvious Go types. | ||||
| // | ||||
| // An exception to the above rules is if a type implements the | ||||
| // encoding.TextUnmarshaler interface. In this case, any primitive TOML value | ||||
| // (floats, strings, integers, booleans and datetimes) will be converted to | ||||
| // a byte string and given to the value's UnmarshalText method. See the | ||||
| // Unmarshaler example for a demonstration with time duration strings. | ||||
| // | ||||
| // Key mapping | ||||
| // | ||||
| // TOML keys can map to either keys in a Go map or field names in a Go | ||||
| // struct. The special `toml` struct tag may be used to map TOML keys to | ||||
| // struct fields that don't match the key name exactly. (See the example.) | ||||
| // A case insensitive match to struct names will be tried if an exact match | ||||
| // can't be found. | ||||
| // | ||||
| // The mapping between TOML values and Go values is loose. That is, there | ||||
| // may exist TOML values that cannot be placed into your representation, and | ||||
| // there may be parts of your representation that do not correspond to | ||||
| // TOML values. This loose mapping can be made stricter by using the IsDefined | ||||
| // and/or Undecoded methods on the MetaData returned. | ||||
| // | ||||
| // This decoder will not handle cyclic types. If a cyclic type is passed, | ||||
| // `Decode` will not terminate. | ||||
| func Decode(data string, v interface{}) (MetaData, error) { | ||||
| 	rv := reflect.ValueOf(v) | ||||
| 	if rv.Kind() != reflect.Ptr { | ||||
| 		return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v)) | ||||
| 	} | ||||
| 	if rv.IsNil() { | ||||
| 		return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v)) | ||||
| 	} | ||||
| 	p, err := parse(data) | ||||
| 	if err != nil { | ||||
| 		return MetaData{}, err | ||||
| 	} | ||||
| 	md := MetaData{ | ||||
| 		p.mapping, p.types, p.ordered, | ||||
| 		make(map[string]bool, len(p.ordered)), nil, | ||||
| 	} | ||||
| 	return md, md.unify(p.mapping, indirect(rv)) | ||||
| } | ||||
|  | ||||
| // DecodeFile is just like Decode, except it will automatically read the | ||||
| // contents of the file at `fpath` and decode it for you. | ||||
| func DecodeFile(fpath string, v interface{}) (MetaData, error) { | ||||
| 	bs, err := ioutil.ReadFile(fpath) | ||||
| 	if err != nil { | ||||
| 		return MetaData{}, err | ||||
| 	} | ||||
| 	return Decode(string(bs), v) | ||||
| } | ||||
|  | ||||
| // DecodeReader is just like Decode, except it will consume all bytes | ||||
| // from the reader and decode it for you. | ||||
| func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { | ||||
| 	bs, err := ioutil.ReadAll(r) | ||||
| 	if err != nil { | ||||
| 		return MetaData{}, err | ||||
| 	} | ||||
| 	return Decode(string(bs), v) | ||||
| } | ||||
|  | ||||
| // unify performs a sort of type unification based on the structure of `rv`, | ||||
| // which is the client representation. | ||||
| // | ||||
| // Any type mismatch produces an error. Finding a type that we don't know | ||||
| // how to handle produces an unsupported type error. | ||||
| func (md *MetaData) unify(data interface{}, rv reflect.Value) error { | ||||
|  | ||||
| 	// Special case. Look for a `Primitive` value. | ||||
| 	if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() { | ||||
| 		// Save the undecoded data and the key context into the primitive | ||||
| 		// value. | ||||
| 		context := make(Key, len(md.context)) | ||||
| 		copy(context, md.context) | ||||
| 		rv.Set(reflect.ValueOf(Primitive{ | ||||
| 			undecoded: data, | ||||
| 			context:   context, | ||||
| 		})) | ||||
| 		return nil | ||||
| 	} | ||||
|  | ||||
| 	// Special case. Unmarshaler Interface support. | ||||
| 	if rv.CanAddr() { | ||||
| 		if v, ok := rv.Addr().Interface().(Unmarshaler); ok { | ||||
| 			return v.UnmarshalTOML(data) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// Special case. Handle time.Time values specifically. | ||||
| 	// TODO: Remove this code when we decide to drop support for Go 1.1. | ||||
| 	// This isn't necessary in Go 1.2 because time.Time satisfies the encoding | ||||
| 	// interfaces. | ||||
| 	if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) { | ||||
| 		return md.unifyDatetime(data, rv) | ||||
| 	} | ||||
|  | ||||
| 	// Special case. Look for a value satisfying the TextUnmarshaler interface. | ||||
| 	if v, ok := rv.Interface().(TextUnmarshaler); ok { | ||||
| 		return md.unifyText(data, v) | ||||
| 	} | ||||
| 	// BUG(burntsushi) | ||||
| 	// The behavior here is incorrect whenever a Go type satisfies the | ||||
| 	// encoding.TextUnmarshaler interface but also corresponds to a TOML | ||||
| 	// hash or array. In particular, the unmarshaler should only be applied | ||||
| 	// to primitive TOML values. But at this point, it will be applied to | ||||
| 	// all kinds of values and produce an incorrect error whenever those values | ||||
| 	// are hashes or arrays (including arrays of tables). | ||||
|  | ||||
| 	k := rv.Kind() | ||||
|  | ||||
| 	// laziness | ||||
| 	if k >= reflect.Int && k <= reflect.Uint64 { | ||||
| 		return md.unifyInt(data, rv) | ||||
| 	} | ||||
| 	switch k { | ||||
| 	case reflect.Ptr: | ||||
| 		elem := reflect.New(rv.Type().Elem()) | ||||
| 		err := md.unify(data, reflect.Indirect(elem)) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		rv.Set(elem) | ||||
| 		return nil | ||||
| 	case reflect.Struct: | ||||
| 		return md.unifyStruct(data, rv) | ||||
| 	case reflect.Map: | ||||
| 		return md.unifyMap(data, rv) | ||||
| 	case reflect.Array: | ||||
| 		return md.unifyArray(data, rv) | ||||
| 	case reflect.Slice: | ||||
| 		return md.unifySlice(data, rv) | ||||
| 	case reflect.String: | ||||
| 		return md.unifyString(data, rv) | ||||
| 	case reflect.Bool: | ||||
| 		return md.unifyBool(data, rv) | ||||
| 	case reflect.Interface: | ||||
| 		// we only support empty interfaces. | ||||
| 		if rv.NumMethod() > 0 { | ||||
| 			return e("unsupported type %s", rv.Type()) | ||||
| 		} | ||||
| 		return md.unifyAnything(data, rv) | ||||
| 	case reflect.Float32: | ||||
| 		fallthrough | ||||
| 	case reflect.Float64: | ||||
| 		return md.unifyFloat64(data, rv) | ||||
| 	} | ||||
| 	return e("unsupported type %s", rv.Kind()) | ||||
| } | ||||
|  | ||||
| func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { | ||||
| 	tmap, ok := mapping.(map[string]interface{}) | ||||
| 	if !ok { | ||||
| 		if mapping == nil { | ||||
| 			return nil | ||||
| 		} | ||||
| 		return e("type mismatch for %s: expected table but found %T", | ||||
| 			rv.Type().String(), mapping) | ||||
| 	} | ||||
|  | ||||
| 	for key, datum := range tmap { | ||||
| 		var f *field | ||||
| 		fields := cachedTypeFields(rv.Type()) | ||||
| 		for i := range fields { | ||||
| 			ff := &fields[i] | ||||
| 			if ff.name == key { | ||||
| 				f = ff | ||||
| 				break | ||||
| 			} | ||||
| 			if f == nil && strings.EqualFold(ff.name, key) { | ||||
| 				f = ff | ||||
| 			} | ||||
| 		} | ||||
| 		if f != nil { | ||||
| 			subv := rv | ||||
| 			for _, i := range f.index { | ||||
| 				subv = indirect(subv.Field(i)) | ||||
| 			} | ||||
| 			if isUnifiable(subv) { | ||||
| 				md.decoded[md.context.add(key).String()] = true | ||||
| 				md.context = append(md.context, key) | ||||
| 				if err := md.unify(datum, subv); err != nil { | ||||
| 					return err | ||||
| 				} | ||||
| 				md.context = md.context[0 : len(md.context)-1] | ||||
| 			} else if f.name != "" { | ||||
| 				// Bad user! No soup for you! | ||||
| 				return e("cannot write unexported field %s.%s", | ||||
| 					rv.Type().String(), f.name) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error { | ||||
| 	tmap, ok := mapping.(map[string]interface{}) | ||||
| 	if !ok { | ||||
| 		if tmap == nil { | ||||
| 			return nil | ||||
| 		} | ||||
| 		return badtype("map", mapping) | ||||
| 	} | ||||
| 	if rv.IsNil() { | ||||
| 		rv.Set(reflect.MakeMap(rv.Type())) | ||||
| 	} | ||||
| 	for k, v := range tmap { | ||||
| 		md.decoded[md.context.add(k).String()] = true | ||||
| 		md.context = append(md.context, k) | ||||
|  | ||||
| 		rvkey := indirect(reflect.New(rv.Type().Key())) | ||||
| 		rvval := reflect.Indirect(reflect.New(rv.Type().Elem())) | ||||
| 		if err := md.unify(v, rvval); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		md.context = md.context[0 : len(md.context)-1] | ||||
|  | ||||
| 		rvkey.SetString(k) | ||||
| 		rv.SetMapIndex(rvkey, rvval) | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error { | ||||
| 	datav := reflect.ValueOf(data) | ||||
| 	if datav.Kind() != reflect.Slice { | ||||
| 		if !datav.IsValid() { | ||||
| 			return nil | ||||
| 		} | ||||
| 		return badtype("slice", data) | ||||
| 	} | ||||
| 	sliceLen := datav.Len() | ||||
| 	if sliceLen != rv.Len() { | ||||
| 		return e("expected array length %d; got TOML array of length %d", | ||||
| 			rv.Len(), sliceLen) | ||||
| 	} | ||||
| 	return md.unifySliceArray(datav, rv) | ||||
| } | ||||
|  | ||||
| func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error { | ||||
| 	datav := reflect.ValueOf(data) | ||||
| 	if datav.Kind() != reflect.Slice { | ||||
| 		if !datav.IsValid() { | ||||
| 			return nil | ||||
| 		} | ||||
| 		return badtype("slice", data) | ||||
| 	} | ||||
| 	n := datav.Len() | ||||
| 	if rv.IsNil() || rv.Cap() < n { | ||||
| 		rv.Set(reflect.MakeSlice(rv.Type(), n, n)) | ||||
| 	} | ||||
| 	rv.SetLen(n) | ||||
| 	return md.unifySliceArray(datav, rv) | ||||
| } | ||||
|  | ||||
| func (md *MetaData) unifySliceArray(data, rv reflect.Value) error { | ||||
| 	sliceLen := data.Len() | ||||
| 	for i := 0; i < sliceLen; i++ { | ||||
| 		v := data.Index(i).Interface() | ||||
| 		sliceval := indirect(rv.Index(i)) | ||||
| 		if err := md.unify(v, sliceval); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error { | ||||
| 	if _, ok := data.(time.Time); ok { | ||||
| 		rv.Set(reflect.ValueOf(data)) | ||||
| 		return nil | ||||
| 	} | ||||
| 	return badtype("time.Time", data) | ||||
| } | ||||
|  | ||||
| func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error { | ||||
| 	if s, ok := data.(string); ok { | ||||
| 		rv.SetString(s) | ||||
| 		return nil | ||||
| 	} | ||||
| 	return badtype("string", data) | ||||
| } | ||||
|  | ||||
| func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { | ||||
| 	if num, ok := data.(float64); ok { | ||||
| 		switch rv.Kind() { | ||||
| 		case reflect.Float32: | ||||
| 			fallthrough | ||||
| 		case reflect.Float64: | ||||
| 			rv.SetFloat(num) | ||||
| 		default: | ||||
| 			panic("bug") | ||||
| 		} | ||||
| 		return nil | ||||
| 	} | ||||
| 	return badtype("float", data) | ||||
| } | ||||
|  | ||||
| func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error { | ||||
| 	if num, ok := data.(int64); ok { | ||||
| 		if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 { | ||||
| 			switch rv.Kind() { | ||||
| 			case reflect.Int, reflect.Int64: | ||||
| 				// No bounds checking necessary. | ||||
| 			case reflect.Int8: | ||||
| 				if num < math.MinInt8 || num > math.MaxInt8 { | ||||
| 					return e("value %d is out of range for int8", num) | ||||
| 				} | ||||
| 			case reflect.Int16: | ||||
| 				if num < math.MinInt16 || num > math.MaxInt16 { | ||||
| 					return e("value %d is out of range for int16", num) | ||||
| 				} | ||||
| 			case reflect.Int32: | ||||
| 				if num < math.MinInt32 || num > math.MaxInt32 { | ||||
| 					return e("value %d is out of range for int32", num) | ||||
| 				} | ||||
| 			} | ||||
| 			rv.SetInt(num) | ||||
| 		} else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 { | ||||
| 			unum := uint64(num) | ||||
| 			switch rv.Kind() { | ||||
| 			case reflect.Uint, reflect.Uint64: | ||||
| 				// No bounds checking necessary. | ||||
| 			case reflect.Uint8: | ||||
| 				if num < 0 || unum > math.MaxUint8 { | ||||
| 					return e("value %d is out of range for uint8", num) | ||||
| 				} | ||||
| 			case reflect.Uint16: | ||||
| 				if num < 0 || unum > math.MaxUint16 { | ||||
| 					return e("value %d is out of range for uint16", num) | ||||
| 				} | ||||
| 			case reflect.Uint32: | ||||
| 				if num < 0 || unum > math.MaxUint32 { | ||||
| 					return e("value %d is out of range for uint32", num) | ||||
| 				} | ||||
| 			} | ||||
| 			rv.SetUint(unum) | ||||
| 		} else { | ||||
| 			panic("unreachable") | ||||
| 		} | ||||
| 		return nil | ||||
| 	} | ||||
| 	return badtype("integer", data) | ||||
| } | ||||
|  | ||||
| func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error { | ||||
| 	if b, ok := data.(bool); ok { | ||||
| 		rv.SetBool(b) | ||||
| 		return nil | ||||
| 	} | ||||
| 	return badtype("boolean", data) | ||||
| } | ||||
|  | ||||
| func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error { | ||||
| 	rv.Set(reflect.ValueOf(data)) | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error { | ||||
| 	var s string | ||||
| 	switch sdata := data.(type) { | ||||
| 	case TextMarshaler: | ||||
| 		text, err := sdata.MarshalText() | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		s = string(text) | ||||
| 	case fmt.Stringer: | ||||
| 		s = sdata.String() | ||||
| 	case string: | ||||
| 		s = sdata | ||||
| 	case bool: | ||||
| 		s = fmt.Sprintf("%v", sdata) | ||||
| 	case int64: | ||||
| 		s = fmt.Sprintf("%d", sdata) | ||||
| 	case float64: | ||||
| 		s = fmt.Sprintf("%f", sdata) | ||||
| 	default: | ||||
| 		return badtype("primitive (string-like)", data) | ||||
| 	} | ||||
| 	if err := v.UnmarshalText([]byte(s)); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // rvalue returns a reflect.Value of `v`. All pointers are resolved. | ||||
| func rvalue(v interface{}) reflect.Value { | ||||
| 	return indirect(reflect.ValueOf(v)) | ||||
| } | ||||
|  | ||||
| // indirect returns the value pointed to by a pointer. | ||||
| // Pointers are followed until the value is not a pointer. | ||||
| // New values are allocated for each nil pointer. | ||||
| // | ||||
| // An exception to this rule is if the value satisfies an interface of | ||||
| // interest to us (like encoding.TextUnmarshaler). | ||||
| func indirect(v reflect.Value) reflect.Value { | ||||
| 	if v.Kind() != reflect.Ptr { | ||||
| 		if v.CanSet() { | ||||
| 			pv := v.Addr() | ||||
| 			if _, ok := pv.Interface().(TextUnmarshaler); ok { | ||||
| 				return pv | ||||
| 			} | ||||
| 		} | ||||
| 		return v | ||||
| 	} | ||||
| 	if v.IsNil() { | ||||
| 		v.Set(reflect.New(v.Type().Elem())) | ||||
| 	} | ||||
| 	return indirect(reflect.Indirect(v)) | ||||
| } | ||||
|  | ||||
| func isUnifiable(rv reflect.Value) bool { | ||||
| 	if rv.CanSet() { | ||||
| 		return true | ||||
| 	} | ||||
| 	if _, ok := rv.Interface().(TextUnmarshaler); ok { | ||||
| 		return true | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| func badtype(expected string, data interface{}) error { | ||||
| 	return e("cannot load TOML value of type %T into a Go %s", data, expected) | ||||
| } | ||||
							
								
								
									
										121
									
								
								vendor/github.com/BurntSushi/toml/decode_meta.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										121
									
								
								vendor/github.com/BurntSushi/toml/decode_meta.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,121 @@ | ||||
| package toml | ||||
|  | ||||
| import "strings" | ||||
|  | ||||
| // MetaData allows access to meta information about TOML data that may not | ||||
| // be inferrable via reflection. In particular, whether a key has been defined | ||||
| // and the TOML type of a key. | ||||
| type MetaData struct { | ||||
| 	mapping map[string]interface{} | ||||
| 	types   map[string]tomlType | ||||
| 	keys    []Key | ||||
| 	decoded map[string]bool | ||||
| 	context Key // Used only during decoding. | ||||
| } | ||||
|  | ||||
| // IsDefined returns true if the key given exists in the TOML data. The key | ||||
| // should be specified hierarchially. e.g., | ||||
| // | ||||
| //	// access the TOML key 'a.b.c' | ||||
| //	IsDefined("a", "b", "c") | ||||
| // | ||||
| // IsDefined will return false if an empty key given. Keys are case sensitive. | ||||
| func (md *MetaData) IsDefined(key ...string) bool { | ||||
| 	if len(key) == 0 { | ||||
| 		return false | ||||
| 	} | ||||
|  | ||||
| 	var hash map[string]interface{} | ||||
| 	var ok bool | ||||
| 	var hashOrVal interface{} = md.mapping | ||||
| 	for _, k := range key { | ||||
| 		if hash, ok = hashOrVal.(map[string]interface{}); !ok { | ||||
| 			return false | ||||
| 		} | ||||
| 		if hashOrVal, ok = hash[k]; !ok { | ||||
| 			return false | ||||
| 		} | ||||
| 	} | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| // Type returns a string representation of the type of the key specified. | ||||
| // | ||||
| // Type will return the empty string if given an empty key or a key that | ||||
| // does not exist. Keys are case sensitive. | ||||
| func (md *MetaData) Type(key ...string) string { | ||||
| 	fullkey := strings.Join(key, ".") | ||||
| 	if typ, ok := md.types[fullkey]; ok { | ||||
| 		return typ.typeString() | ||||
| 	} | ||||
| 	return "" | ||||
| } | ||||
|  | ||||
| // Key is the type of any TOML key, including key groups. Use (MetaData).Keys | ||||
| // to get values of this type. | ||||
| type Key []string | ||||
|  | ||||
| func (k Key) String() string { | ||||
| 	return strings.Join(k, ".") | ||||
| } | ||||
|  | ||||
| func (k Key) maybeQuotedAll() string { | ||||
| 	var ss []string | ||||
| 	for i := range k { | ||||
| 		ss = append(ss, k.maybeQuoted(i)) | ||||
| 	} | ||||
| 	return strings.Join(ss, ".") | ||||
| } | ||||
|  | ||||
| func (k Key) maybeQuoted(i int) string { | ||||
| 	quote := false | ||||
| 	for _, c := range k[i] { | ||||
| 		if !isBareKeyChar(c) { | ||||
| 			quote = true | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
| 	if quote { | ||||
| 		return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\"" | ||||
| 	} | ||||
| 	return k[i] | ||||
| } | ||||
|  | ||||
| func (k Key) add(piece string) Key { | ||||
| 	newKey := make(Key, len(k)+1) | ||||
| 	copy(newKey, k) | ||||
| 	newKey[len(k)] = piece | ||||
| 	return newKey | ||||
| } | ||||
|  | ||||
| // Keys returns a slice of every key in the TOML data, including key groups. | ||||
| // Each key is itself a slice, where the first element is the top of the | ||||
| // hierarchy and the last is the most specific. | ||||
| // | ||||
| // The list will have the same order as the keys appeared in the TOML data. | ||||
| // | ||||
| // All keys returned are non-empty. | ||||
| func (md *MetaData) Keys() []Key { | ||||
| 	return md.keys | ||||
| } | ||||
|  | ||||
| // Undecoded returns all keys that have not been decoded in the order in which | ||||
| // they appear in the original TOML document. | ||||
| // | ||||
| // This includes keys that haven't been decoded because of a Primitive value. | ||||
| // Once the Primitive value is decoded, the keys will be considered decoded. | ||||
| // | ||||
| // Also note that decoding into an empty interface will result in no decoding, | ||||
| // and so no keys will be considered decoded. | ||||
| // | ||||
| // In this sense, the Undecoded keys correspond to keys in the TOML document | ||||
| // that do not have a concrete type in your representation. | ||||
| func (md *MetaData) Undecoded() []Key { | ||||
| 	undecoded := make([]Key, 0, len(md.keys)) | ||||
| 	for _, key := range md.keys { | ||||
| 		if !md.decoded[key.String()] { | ||||
| 			undecoded = append(undecoded, key) | ||||
| 		} | ||||
| 	} | ||||
| 	return undecoded | ||||
| } | ||||
							
								
								
									
										27
									
								
								vendor/github.com/BurntSushi/toml/doc.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										27
									
								
								vendor/github.com/BurntSushi/toml/doc.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,27 @@ | ||||
| /* | ||||
| Package toml provides facilities for decoding and encoding TOML configuration | ||||
| files via reflection. There is also support for delaying decoding with | ||||
| the Primitive type, and querying the set of keys in a TOML document with the | ||||
| MetaData type. | ||||
|  | ||||
| The specification implemented: https://github.com/toml-lang/toml | ||||
|  | ||||
| The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify | ||||
| whether a file is a valid TOML document. It can also be used to print the | ||||
| type of each key in a TOML document. | ||||
|  | ||||
| Testing | ||||
|  | ||||
| There are two important types of tests used for this package. The first is | ||||
| contained inside '*_test.go' files and uses the standard Go unit testing | ||||
| framework. These tests are primarily devoted to holistically testing the | ||||
| decoder and encoder. | ||||
|  | ||||
| The second type of testing is used to verify the implementation's adherence | ||||
| to the TOML specification. These tests have been factored into their own | ||||
| project: https://github.com/BurntSushi/toml-test | ||||
|  | ||||
| The reason the tests are in a separate project is so that they can be used by | ||||
| any implementation of TOML. Namely, it is language agnostic. | ||||
| */ | ||||
| package toml | ||||
							
								
								
									
										568
									
								
								vendor/github.com/BurntSushi/toml/encode.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										568
									
								
								vendor/github.com/BurntSushi/toml/encode.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,568 @@ | ||||
| package toml | ||||
|  | ||||
| import ( | ||||
| 	"bufio" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"reflect" | ||||
| 	"sort" | ||||
| 	"strconv" | ||||
| 	"strings" | ||||
| 	"time" | ||||
| ) | ||||
|  | ||||
| type tomlEncodeError struct{ error } | ||||
|  | ||||
| var ( | ||||
| 	errArrayMixedElementTypes = errors.New( | ||||
| 		"toml: cannot encode array with mixed element types") | ||||
| 	errArrayNilElement = errors.New( | ||||
| 		"toml: cannot encode array with nil element") | ||||
| 	errNonString = errors.New( | ||||
| 		"toml: cannot encode a map with non-string key type") | ||||
| 	errAnonNonStruct = errors.New( | ||||
| 		"toml: cannot encode an anonymous field that is not a struct") | ||||
| 	errArrayNoTable = errors.New( | ||||
| 		"toml: TOML array element cannot contain a table") | ||||
| 	errNoKey = errors.New( | ||||
| 		"toml: top-level values must be Go maps or structs") | ||||
| 	errAnything = errors.New("") // used in testing | ||||
| ) | ||||
|  | ||||
| var quotedReplacer = strings.NewReplacer( | ||||
| 	"\t", "\\t", | ||||
| 	"\n", "\\n", | ||||
| 	"\r", "\\r", | ||||
| 	"\"", "\\\"", | ||||
| 	"\\", "\\\\", | ||||
| ) | ||||
|  | ||||
| // Encoder controls the encoding of Go values to a TOML document to some | ||||
| // io.Writer. | ||||
| // | ||||
| // The indentation level can be controlled with the Indent field. | ||||
| type Encoder struct { | ||||
| 	// A single indentation level. By default it is two spaces. | ||||
| 	Indent string | ||||
|  | ||||
| 	// hasWritten is whether we have written any output to w yet. | ||||
| 	hasWritten bool | ||||
| 	w          *bufio.Writer | ||||
| } | ||||
|  | ||||
| // NewEncoder returns a TOML encoder that encodes Go values to the io.Writer | ||||
| // given. By default, a single indentation level is 2 spaces. | ||||
| func NewEncoder(w io.Writer) *Encoder { | ||||
| 	return &Encoder{ | ||||
| 		w:      bufio.NewWriter(w), | ||||
| 		Indent: "  ", | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Encode writes a TOML representation of the Go value to the underlying | ||||
| // io.Writer. If the value given cannot be encoded to a valid TOML document, | ||||
| // then an error is returned. | ||||
| // | ||||
| // The mapping between Go values and TOML values should be precisely the same | ||||
| // as for the Decode* functions. Similarly, the TextMarshaler interface is | ||||
| // supported by encoding the resulting bytes as strings. (If you want to write | ||||
| // arbitrary binary data then you will need to use something like base64 since | ||||
| // TOML does not have any binary types.) | ||||
| // | ||||
| // When encoding TOML hashes (i.e., Go maps or structs), keys without any | ||||
| // sub-hashes are encoded first. | ||||
| // | ||||
| // If a Go map is encoded, then its keys are sorted alphabetically for | ||||
| // deterministic output. More control over this behavior may be provided if | ||||
| // there is demand for it. | ||||
| // | ||||
| // Encoding Go values without a corresponding TOML representation---like map | ||||
| // types with non-string keys---will cause an error to be returned. Similarly | ||||
| // for mixed arrays/slices, arrays/slices with nil elements, embedded | ||||
| // non-struct types and nested slices containing maps or structs. | ||||
| // (e.g., [][]map[string]string is not allowed but []map[string]string is OK | ||||
| // and so is []map[string][]string.) | ||||
| func (enc *Encoder) Encode(v interface{}) error { | ||||
| 	rv := eindirect(reflect.ValueOf(v)) | ||||
| 	if err := enc.safeEncode(Key([]string{}), rv); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	return enc.w.Flush() | ||||
| } | ||||
|  | ||||
| func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) { | ||||
| 	defer func() { | ||||
| 		if r := recover(); r != nil { | ||||
| 			if terr, ok := r.(tomlEncodeError); ok { | ||||
| 				err = terr.error | ||||
| 				return | ||||
| 			} | ||||
| 			panic(r) | ||||
| 		} | ||||
| 	}() | ||||
| 	enc.encode(key, rv) | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (enc *Encoder) encode(key Key, rv reflect.Value) { | ||||
| 	// Special case. Time needs to be in ISO8601 format. | ||||
| 	// Special case. If we can marshal the type to text, then we used that. | ||||
| 	// Basically, this prevents the encoder for handling these types as | ||||
| 	// generic structs (or whatever the underlying type of a TextMarshaler is). | ||||
| 	switch rv.Interface().(type) { | ||||
| 	case time.Time, TextMarshaler: | ||||
| 		enc.keyEqElement(key, rv) | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	k := rv.Kind() | ||||
| 	switch k { | ||||
| 	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, | ||||
| 		reflect.Int64, | ||||
| 		reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, | ||||
| 		reflect.Uint64, | ||||
| 		reflect.Float32, reflect.Float64, reflect.String, reflect.Bool: | ||||
| 		enc.keyEqElement(key, rv) | ||||
| 	case reflect.Array, reflect.Slice: | ||||
| 		if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) { | ||||
| 			enc.eArrayOfTables(key, rv) | ||||
| 		} else { | ||||
| 			enc.keyEqElement(key, rv) | ||||
| 		} | ||||
| 	case reflect.Interface: | ||||
| 		if rv.IsNil() { | ||||
| 			return | ||||
| 		} | ||||
| 		enc.encode(key, rv.Elem()) | ||||
| 	case reflect.Map: | ||||
| 		if rv.IsNil() { | ||||
| 			return | ||||
| 		} | ||||
| 		enc.eTable(key, rv) | ||||
| 	case reflect.Ptr: | ||||
| 		if rv.IsNil() { | ||||
| 			return | ||||
| 		} | ||||
| 		enc.encode(key, rv.Elem()) | ||||
| 	case reflect.Struct: | ||||
| 		enc.eTable(key, rv) | ||||
| 	default: | ||||
| 		panic(e("unsupported type for key '%s': %s", key, k)) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // eElement encodes any value that can be an array element (primitives and | ||||
| // arrays). | ||||
| func (enc *Encoder) eElement(rv reflect.Value) { | ||||
| 	switch v := rv.Interface().(type) { | ||||
| 	case time.Time: | ||||
| 		// Special case time.Time as a primitive. Has to come before | ||||
| 		// TextMarshaler below because time.Time implements | ||||
| 		// encoding.TextMarshaler, but we need to always use UTC. | ||||
| 		enc.wf(v.UTC().Format("2006-01-02T15:04:05Z")) | ||||
| 		return | ||||
| 	case TextMarshaler: | ||||
| 		// Special case. Use text marshaler if it's available for this value. | ||||
| 		if s, err := v.MarshalText(); err != nil { | ||||
| 			encPanic(err) | ||||
| 		} else { | ||||
| 			enc.writeQuoted(string(s)) | ||||
| 		} | ||||
| 		return | ||||
| 	} | ||||
| 	switch rv.Kind() { | ||||
| 	case reflect.Bool: | ||||
| 		enc.wf(strconv.FormatBool(rv.Bool())) | ||||
| 	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, | ||||
| 		reflect.Int64: | ||||
| 		enc.wf(strconv.FormatInt(rv.Int(), 10)) | ||||
| 	case reflect.Uint, reflect.Uint8, reflect.Uint16, | ||||
| 		reflect.Uint32, reflect.Uint64: | ||||
| 		enc.wf(strconv.FormatUint(rv.Uint(), 10)) | ||||
| 	case reflect.Float32: | ||||
| 		enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32))) | ||||
| 	case reflect.Float64: | ||||
| 		enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64))) | ||||
| 	case reflect.Array, reflect.Slice: | ||||
| 		enc.eArrayOrSliceElement(rv) | ||||
| 	case reflect.Interface: | ||||
| 		enc.eElement(rv.Elem()) | ||||
| 	case reflect.String: | ||||
| 		enc.writeQuoted(rv.String()) | ||||
| 	default: | ||||
| 		panic(e("unexpected primitive type: %s", rv.Kind())) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // By the TOML spec, all floats must have a decimal with at least one | ||||
| // number on either side. | ||||
| func floatAddDecimal(fstr string) string { | ||||
| 	if !strings.Contains(fstr, ".") { | ||||
| 		return fstr + ".0" | ||||
| 	} | ||||
| 	return fstr | ||||
| } | ||||
|  | ||||
| func (enc *Encoder) writeQuoted(s string) { | ||||
| 	enc.wf("\"%s\"", quotedReplacer.Replace(s)) | ||||
| } | ||||
|  | ||||
| func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) { | ||||
| 	length := rv.Len() | ||||
| 	enc.wf("[") | ||||
| 	for i := 0; i < length; i++ { | ||||
| 		elem := rv.Index(i) | ||||
| 		enc.eElement(elem) | ||||
| 		if i != length-1 { | ||||
| 			enc.wf(", ") | ||||
| 		} | ||||
| 	} | ||||
| 	enc.wf("]") | ||||
| } | ||||
|  | ||||
| func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) { | ||||
| 	if len(key) == 0 { | ||||
| 		encPanic(errNoKey) | ||||
| 	} | ||||
| 	for i := 0; i < rv.Len(); i++ { | ||||
| 		trv := rv.Index(i) | ||||
| 		if isNil(trv) { | ||||
| 			continue | ||||
| 		} | ||||
| 		panicIfInvalidKey(key) | ||||
| 		enc.newline() | ||||
| 		enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll()) | ||||
| 		enc.newline() | ||||
| 		enc.eMapOrStruct(key, trv) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (enc *Encoder) eTable(key Key, rv reflect.Value) { | ||||
| 	panicIfInvalidKey(key) | ||||
| 	if len(key) == 1 { | ||||
| 		// Output an extra newline between top-level tables. | ||||
| 		// (The newline isn't written if nothing else has been written though.) | ||||
| 		enc.newline() | ||||
| 	} | ||||
| 	if len(key) > 0 { | ||||
| 		enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll()) | ||||
| 		enc.newline() | ||||
| 	} | ||||
| 	enc.eMapOrStruct(key, rv) | ||||
| } | ||||
|  | ||||
| func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) { | ||||
| 	switch rv := eindirect(rv); rv.Kind() { | ||||
| 	case reflect.Map: | ||||
| 		enc.eMap(key, rv) | ||||
| 	case reflect.Struct: | ||||
| 		enc.eStruct(key, rv) | ||||
| 	default: | ||||
| 		panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String()) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (enc *Encoder) eMap(key Key, rv reflect.Value) { | ||||
| 	rt := rv.Type() | ||||
| 	if rt.Key().Kind() != reflect.String { | ||||
| 		encPanic(errNonString) | ||||
| 	} | ||||
|  | ||||
| 	// Sort keys so that we have deterministic output. And write keys directly | ||||
| 	// underneath this key first, before writing sub-structs or sub-maps. | ||||
| 	var mapKeysDirect, mapKeysSub []string | ||||
| 	for _, mapKey := range rv.MapKeys() { | ||||
| 		k := mapKey.String() | ||||
| 		if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) { | ||||
| 			mapKeysSub = append(mapKeysSub, k) | ||||
| 		} else { | ||||
| 			mapKeysDirect = append(mapKeysDirect, k) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	var writeMapKeys = func(mapKeys []string) { | ||||
| 		sort.Strings(mapKeys) | ||||
| 		for _, mapKey := range mapKeys { | ||||
| 			mrv := rv.MapIndex(reflect.ValueOf(mapKey)) | ||||
| 			if isNil(mrv) { | ||||
| 				// Don't write anything for nil fields. | ||||
| 				continue | ||||
| 			} | ||||
| 			enc.encode(key.add(mapKey), mrv) | ||||
| 		} | ||||
| 	} | ||||
| 	writeMapKeys(mapKeysDirect) | ||||
| 	writeMapKeys(mapKeysSub) | ||||
| } | ||||
|  | ||||
| func (enc *Encoder) eStruct(key Key, rv reflect.Value) { | ||||
| 	// Write keys for fields directly under this key first, because if we write | ||||
| 	// a field that creates a new table, then all keys under it will be in that | ||||
| 	// table (not the one we're writing here). | ||||
| 	rt := rv.Type() | ||||
| 	var fieldsDirect, fieldsSub [][]int | ||||
| 	var addFields func(rt reflect.Type, rv reflect.Value, start []int) | ||||
| 	addFields = func(rt reflect.Type, rv reflect.Value, start []int) { | ||||
| 		for i := 0; i < rt.NumField(); i++ { | ||||
| 			f := rt.Field(i) | ||||
| 			// skip unexported fields | ||||
| 			if f.PkgPath != "" && !f.Anonymous { | ||||
| 				continue | ||||
| 			} | ||||
| 			frv := rv.Field(i) | ||||
| 			if f.Anonymous { | ||||
| 				t := f.Type | ||||
| 				switch t.Kind() { | ||||
| 				case reflect.Struct: | ||||
| 					// Treat anonymous struct fields with | ||||
| 					// tag names as though they are not | ||||
| 					// anonymous, like encoding/json does. | ||||
| 					if getOptions(f.Tag).name == "" { | ||||
| 						addFields(t, frv, f.Index) | ||||
| 						continue | ||||
| 					} | ||||
| 				case reflect.Ptr: | ||||
| 					if t.Elem().Kind() == reflect.Struct && | ||||
| 						getOptions(f.Tag).name == "" { | ||||
| 						if !frv.IsNil() { | ||||
| 							addFields(t.Elem(), frv.Elem(), f.Index) | ||||
| 						} | ||||
| 						continue | ||||
| 					} | ||||
| 					// Fall through to the normal field encoding logic below | ||||
| 					// for non-struct anonymous fields. | ||||
| 				} | ||||
| 			} | ||||
|  | ||||
| 			if typeIsHash(tomlTypeOfGo(frv)) { | ||||
| 				fieldsSub = append(fieldsSub, append(start, f.Index...)) | ||||
| 			} else { | ||||
| 				fieldsDirect = append(fieldsDirect, append(start, f.Index...)) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	addFields(rt, rv, nil) | ||||
|  | ||||
| 	var writeFields = func(fields [][]int) { | ||||
| 		for _, fieldIndex := range fields { | ||||
| 			sft := rt.FieldByIndex(fieldIndex) | ||||
| 			sf := rv.FieldByIndex(fieldIndex) | ||||
| 			if isNil(sf) { | ||||
| 				// Don't write anything for nil fields. | ||||
| 				continue | ||||
| 			} | ||||
|  | ||||
| 			opts := getOptions(sft.Tag) | ||||
| 			if opts.skip { | ||||
| 				continue | ||||
| 			} | ||||
| 			keyName := sft.Name | ||||
| 			if opts.name != "" { | ||||
| 				keyName = opts.name | ||||
| 			} | ||||
| 			if opts.omitempty && isEmpty(sf) { | ||||
| 				continue | ||||
| 			} | ||||
| 			if opts.omitzero && isZero(sf) { | ||||
| 				continue | ||||
| 			} | ||||
|  | ||||
| 			enc.encode(key.add(keyName), sf) | ||||
| 		} | ||||
| 	} | ||||
| 	writeFields(fieldsDirect) | ||||
| 	writeFields(fieldsSub) | ||||
| } | ||||
|  | ||||
| // tomlTypeName returns the TOML type name of the Go value's type. It is | ||||
| // used to determine whether the types of array elements are mixed (which is | ||||
| // forbidden). If the Go value is nil, then it is illegal for it to be an array | ||||
| // element, and valueIsNil is returned as true. | ||||
|  | ||||
| // Returns the TOML type of a Go value. The type may be `nil`, which means | ||||
| // no concrete TOML type could be found. | ||||
| func tomlTypeOfGo(rv reflect.Value) tomlType { | ||||
| 	if isNil(rv) || !rv.IsValid() { | ||||
| 		return nil | ||||
| 	} | ||||
| 	switch rv.Kind() { | ||||
| 	case reflect.Bool: | ||||
| 		return tomlBool | ||||
| 	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, | ||||
| 		reflect.Int64, | ||||
| 		reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, | ||||
| 		reflect.Uint64: | ||||
| 		return tomlInteger | ||||
| 	case reflect.Float32, reflect.Float64: | ||||
| 		return tomlFloat | ||||
| 	case reflect.Array, reflect.Slice: | ||||
| 		if typeEqual(tomlHash, tomlArrayType(rv)) { | ||||
| 			return tomlArrayHash | ||||
| 		} | ||||
| 		return tomlArray | ||||
| 	case reflect.Ptr, reflect.Interface: | ||||
| 		return tomlTypeOfGo(rv.Elem()) | ||||
| 	case reflect.String: | ||||
| 		return tomlString | ||||
| 	case reflect.Map: | ||||
| 		return tomlHash | ||||
| 	case reflect.Struct: | ||||
| 		switch rv.Interface().(type) { | ||||
| 		case time.Time: | ||||
| 			return tomlDatetime | ||||
| 		case TextMarshaler: | ||||
| 			return tomlString | ||||
| 		default: | ||||
| 			return tomlHash | ||||
| 		} | ||||
| 	default: | ||||
| 		panic("unexpected reflect.Kind: " + rv.Kind().String()) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // tomlArrayType returns the element type of a TOML array. The type returned | ||||
| // may be nil if it cannot be determined (e.g., a nil slice or a zero length | ||||
| // slize). This function may also panic if it finds a type that cannot be | ||||
| // expressed in TOML (such as nil elements, heterogeneous arrays or directly | ||||
| // nested arrays of tables). | ||||
| func tomlArrayType(rv reflect.Value) tomlType { | ||||
| 	if isNil(rv) || !rv.IsValid() || rv.Len() == 0 { | ||||
| 		return nil | ||||
| 	} | ||||
| 	firstType := tomlTypeOfGo(rv.Index(0)) | ||||
| 	if firstType == nil { | ||||
| 		encPanic(errArrayNilElement) | ||||
| 	} | ||||
|  | ||||
| 	rvlen := rv.Len() | ||||
| 	for i := 1; i < rvlen; i++ { | ||||
| 		elem := rv.Index(i) | ||||
| 		switch elemType := tomlTypeOfGo(elem); { | ||||
| 		case elemType == nil: | ||||
| 			encPanic(errArrayNilElement) | ||||
| 		case !typeEqual(firstType, elemType): | ||||
| 			encPanic(errArrayMixedElementTypes) | ||||
| 		} | ||||
| 	} | ||||
| 	// If we have a nested array, then we must make sure that the nested | ||||
| 	// array contains ONLY primitives. | ||||
| 	// This checks arbitrarily nested arrays. | ||||
| 	if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) { | ||||
| 		nest := tomlArrayType(eindirect(rv.Index(0))) | ||||
| 		if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) { | ||||
| 			encPanic(errArrayNoTable) | ||||
| 		} | ||||
| 	} | ||||
| 	return firstType | ||||
| } | ||||
|  | ||||
| type tagOptions struct { | ||||
| 	skip      bool // "-" | ||||
| 	name      string | ||||
| 	omitempty bool | ||||
| 	omitzero  bool | ||||
| } | ||||
|  | ||||
| func getOptions(tag reflect.StructTag) tagOptions { | ||||
| 	t := tag.Get("toml") | ||||
| 	if t == "-" { | ||||
| 		return tagOptions{skip: true} | ||||
| 	} | ||||
| 	var opts tagOptions | ||||
| 	parts := strings.Split(t, ",") | ||||
| 	opts.name = parts[0] | ||||
| 	for _, s := range parts[1:] { | ||||
| 		switch s { | ||||
| 		case "omitempty": | ||||
| 			opts.omitempty = true | ||||
| 		case "omitzero": | ||||
| 			opts.omitzero = true | ||||
| 		} | ||||
| 	} | ||||
| 	return opts | ||||
| } | ||||
|  | ||||
| func isZero(rv reflect.Value) bool { | ||||
| 	switch rv.Kind() { | ||||
| 	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: | ||||
| 		return rv.Int() == 0 | ||||
| 	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: | ||||
| 		return rv.Uint() == 0 | ||||
| 	case reflect.Float32, reflect.Float64: | ||||
| 		return rv.Float() == 0.0 | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| func isEmpty(rv reflect.Value) bool { | ||||
| 	switch rv.Kind() { | ||||
| 	case reflect.Array, reflect.Slice, reflect.Map, reflect.String: | ||||
| 		return rv.Len() == 0 | ||||
| 	case reflect.Bool: | ||||
| 		return !rv.Bool() | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| func (enc *Encoder) newline() { | ||||
| 	if enc.hasWritten { | ||||
| 		enc.wf("\n") | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (enc *Encoder) keyEqElement(key Key, val reflect.Value) { | ||||
| 	if len(key) == 0 { | ||||
| 		encPanic(errNoKey) | ||||
| 	} | ||||
| 	panicIfInvalidKey(key) | ||||
| 	enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) | ||||
| 	enc.eElement(val) | ||||
| 	enc.newline() | ||||
| } | ||||
|  | ||||
| func (enc *Encoder) wf(format string, v ...interface{}) { | ||||
| 	if _, err := fmt.Fprintf(enc.w, format, v...); err != nil { | ||||
| 		encPanic(err) | ||||
| 	} | ||||
| 	enc.hasWritten = true | ||||
| } | ||||
|  | ||||
| func (enc *Encoder) indentStr(key Key) string { | ||||
| 	return strings.Repeat(enc.Indent, len(key)-1) | ||||
| } | ||||
|  | ||||
| func encPanic(err error) { | ||||
| 	panic(tomlEncodeError{err}) | ||||
| } | ||||
|  | ||||
| func eindirect(v reflect.Value) reflect.Value { | ||||
| 	switch v.Kind() { | ||||
| 	case reflect.Ptr, reflect.Interface: | ||||
| 		return eindirect(v.Elem()) | ||||
| 	default: | ||||
| 		return v | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func isNil(rv reflect.Value) bool { | ||||
| 	switch rv.Kind() { | ||||
| 	case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: | ||||
| 		return rv.IsNil() | ||||
| 	default: | ||||
| 		return false | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func panicIfInvalidKey(key Key) { | ||||
| 	for _, k := range key { | ||||
| 		if len(k) == 0 { | ||||
| 			encPanic(e("Key '%s' is not a valid table name. Key names "+ | ||||
| 				"cannot be empty.", key.maybeQuotedAll())) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func isValidKeyName(s string) bool { | ||||
| 	return len(s) != 0 | ||||
| } | ||||
							
								
								
									
										19
									
								
								vendor/github.com/BurntSushi/toml/encoding_types.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										19
									
								
								vendor/github.com/BurntSushi/toml/encoding_types.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,19 @@ | ||||
| // +build go1.2 | ||||
|  | ||||
| package toml | ||||
|  | ||||
| // In order to support Go 1.1, we define our own TextMarshaler and | ||||
| // TextUnmarshaler types. For Go 1.2+, we just alias them with the | ||||
| // standard library interfaces. | ||||
|  | ||||
| import ( | ||||
| 	"encoding" | ||||
| ) | ||||
|  | ||||
| // TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here | ||||
| // so that Go 1.1 can be supported. | ||||
| type TextMarshaler encoding.TextMarshaler | ||||
|  | ||||
| // TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined | ||||
| // here so that Go 1.1 can be supported. | ||||
| type TextUnmarshaler encoding.TextUnmarshaler | ||||
							
								
								
									
										18
									
								
								vendor/github.com/BurntSushi/toml/encoding_types_1.1.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										18
									
								
								vendor/github.com/BurntSushi/toml/encoding_types_1.1.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,18 @@ | ||||
| // +build !go1.2 | ||||
|  | ||||
| package toml | ||||
|  | ||||
| // These interfaces were introduced in Go 1.2, so we add them manually when | ||||
| // compiling for Go 1.1. | ||||
|  | ||||
| // TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here | ||||
| // so that Go 1.1 can be supported. | ||||
| type TextMarshaler interface { | ||||
| 	MarshalText() (text []byte, err error) | ||||
| } | ||||
|  | ||||
| // TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined | ||||
| // here so that Go 1.1 can be supported. | ||||
| type TextUnmarshaler interface { | ||||
| 	UnmarshalText(text []byte) error | ||||
| } | ||||
							
								
								
									
										953
									
								
								vendor/github.com/BurntSushi/toml/lex.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										953
									
								
								vendor/github.com/BurntSushi/toml/lex.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,953 @@ | ||||
| package toml | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"strings" | ||||
| 	"unicode" | ||||
| 	"unicode/utf8" | ||||
| ) | ||||
|  | ||||
| type itemType int | ||||
|  | ||||
| const ( | ||||
| 	itemError itemType = iota | ||||
| 	itemNIL            // used in the parser to indicate no type | ||||
| 	itemEOF | ||||
| 	itemText | ||||
| 	itemString | ||||
| 	itemRawString | ||||
| 	itemMultilineString | ||||
| 	itemRawMultilineString | ||||
| 	itemBool | ||||
| 	itemInteger | ||||
| 	itemFloat | ||||
| 	itemDatetime | ||||
| 	itemArray // the start of an array | ||||
| 	itemArrayEnd | ||||
| 	itemTableStart | ||||
| 	itemTableEnd | ||||
| 	itemArrayTableStart | ||||
| 	itemArrayTableEnd | ||||
| 	itemKeyStart | ||||
| 	itemCommentStart | ||||
| 	itemInlineTableStart | ||||
| 	itemInlineTableEnd | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	eof              = 0 | ||||
| 	comma            = ',' | ||||
| 	tableStart       = '[' | ||||
| 	tableEnd         = ']' | ||||
| 	arrayTableStart  = '[' | ||||
| 	arrayTableEnd    = ']' | ||||
| 	tableSep         = '.' | ||||
| 	keySep           = '=' | ||||
| 	arrayStart       = '[' | ||||
| 	arrayEnd         = ']' | ||||
| 	commentStart     = '#' | ||||
| 	stringStart      = '"' | ||||
| 	stringEnd        = '"' | ||||
| 	rawStringStart   = '\'' | ||||
| 	rawStringEnd     = '\'' | ||||
| 	inlineTableStart = '{' | ||||
| 	inlineTableEnd   = '}' | ||||
| ) | ||||
|  | ||||
| type stateFn func(lx *lexer) stateFn | ||||
|  | ||||
| type lexer struct { | ||||
| 	input string | ||||
| 	start int | ||||
| 	pos   int | ||||
| 	line  int | ||||
| 	state stateFn | ||||
| 	items chan item | ||||
|  | ||||
| 	// Allow for backing up up to three runes. | ||||
| 	// This is necessary because TOML contains 3-rune tokens (""" and '''). | ||||
| 	prevWidths [3]int | ||||
| 	nprev      int // how many of prevWidths are in use | ||||
| 	// If we emit an eof, we can still back up, but it is not OK to call | ||||
| 	// next again. | ||||
| 	atEOF bool | ||||
|  | ||||
| 	// A stack of state functions used to maintain context. | ||||
| 	// The idea is to reuse parts of the state machine in various places. | ||||
| 	// For example, values can appear at the top level or within arbitrarily | ||||
| 	// nested arrays. The last state on the stack is used after a value has | ||||
| 	// been lexed. Similarly for comments. | ||||
| 	stack []stateFn | ||||
| } | ||||
|  | ||||
| type item struct { | ||||
| 	typ  itemType | ||||
| 	val  string | ||||
| 	line int | ||||
| } | ||||
|  | ||||
| func (lx *lexer) nextItem() item { | ||||
| 	for { | ||||
| 		select { | ||||
| 		case item := <-lx.items: | ||||
| 			return item | ||||
| 		default: | ||||
| 			lx.state = lx.state(lx) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func lex(input string) *lexer { | ||||
| 	lx := &lexer{ | ||||
| 		input: input, | ||||
| 		state: lexTop, | ||||
| 		line:  1, | ||||
| 		items: make(chan item, 10), | ||||
| 		stack: make([]stateFn, 0, 10), | ||||
| 	} | ||||
| 	return lx | ||||
| } | ||||
|  | ||||
| func (lx *lexer) push(state stateFn) { | ||||
| 	lx.stack = append(lx.stack, state) | ||||
| } | ||||
|  | ||||
| func (lx *lexer) pop() stateFn { | ||||
| 	if len(lx.stack) == 0 { | ||||
| 		return lx.errorf("BUG in lexer: no states to pop") | ||||
| 	} | ||||
| 	last := lx.stack[len(lx.stack)-1] | ||||
| 	lx.stack = lx.stack[0 : len(lx.stack)-1] | ||||
| 	return last | ||||
| } | ||||
|  | ||||
| func (lx *lexer) current() string { | ||||
| 	return lx.input[lx.start:lx.pos] | ||||
| } | ||||
|  | ||||
| func (lx *lexer) emit(typ itemType) { | ||||
| 	lx.items <- item{typ, lx.current(), lx.line} | ||||
| 	lx.start = lx.pos | ||||
| } | ||||
|  | ||||
| func (lx *lexer) emitTrim(typ itemType) { | ||||
| 	lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line} | ||||
| 	lx.start = lx.pos | ||||
| } | ||||
|  | ||||
| func (lx *lexer) next() (r rune) { | ||||
| 	if lx.atEOF { | ||||
| 		panic("next called after EOF") | ||||
| 	} | ||||
| 	if lx.pos >= len(lx.input) { | ||||
| 		lx.atEOF = true | ||||
| 		return eof | ||||
| 	} | ||||
|  | ||||
| 	if lx.input[lx.pos] == '\n' { | ||||
| 		lx.line++ | ||||
| 	} | ||||
| 	lx.prevWidths[2] = lx.prevWidths[1] | ||||
| 	lx.prevWidths[1] = lx.prevWidths[0] | ||||
| 	if lx.nprev < 3 { | ||||
| 		lx.nprev++ | ||||
| 	} | ||||
| 	r, w := utf8.DecodeRuneInString(lx.input[lx.pos:]) | ||||
| 	lx.prevWidths[0] = w | ||||
| 	lx.pos += w | ||||
| 	return r | ||||
| } | ||||
|  | ||||
| // ignore skips over the pending input before this point. | ||||
| func (lx *lexer) ignore() { | ||||
| 	lx.start = lx.pos | ||||
| } | ||||
|  | ||||
| // backup steps back one rune. Can be called only twice between calls to next. | ||||
| func (lx *lexer) backup() { | ||||
| 	if lx.atEOF { | ||||
| 		lx.atEOF = false | ||||
| 		return | ||||
| 	} | ||||
| 	if lx.nprev < 1 { | ||||
| 		panic("backed up too far") | ||||
| 	} | ||||
| 	w := lx.prevWidths[0] | ||||
| 	lx.prevWidths[0] = lx.prevWidths[1] | ||||
| 	lx.prevWidths[1] = lx.prevWidths[2] | ||||
| 	lx.nprev-- | ||||
| 	lx.pos -= w | ||||
| 	if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' { | ||||
| 		lx.line-- | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // accept consumes the next rune if it's equal to `valid`. | ||||
| func (lx *lexer) accept(valid rune) bool { | ||||
| 	if lx.next() == valid { | ||||
| 		return true | ||||
| 	} | ||||
| 	lx.backup() | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| // peek returns but does not consume the next rune in the input. | ||||
| func (lx *lexer) peek() rune { | ||||
| 	r := lx.next() | ||||
| 	lx.backup() | ||||
| 	return r | ||||
| } | ||||
|  | ||||
| // skip ignores all input that matches the given predicate. | ||||
| func (lx *lexer) skip(pred func(rune) bool) { | ||||
| 	for { | ||||
| 		r := lx.next() | ||||
| 		if pred(r) { | ||||
| 			continue | ||||
| 		} | ||||
| 		lx.backup() | ||||
| 		lx.ignore() | ||||
| 		return | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // errorf stops all lexing by emitting an error and returning `nil`. | ||||
| // Note that any value that is a character is escaped if it's a special | ||||
| // character (newlines, tabs, etc.). | ||||
| func (lx *lexer) errorf(format string, values ...interface{}) stateFn { | ||||
| 	lx.items <- item{ | ||||
| 		itemError, | ||||
| 		fmt.Sprintf(format, values...), | ||||
| 		lx.line, | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // lexTop consumes elements at the top level of TOML data. | ||||
| func lexTop(lx *lexer) stateFn { | ||||
| 	r := lx.next() | ||||
| 	if isWhitespace(r) || isNL(r) { | ||||
| 		return lexSkip(lx, lexTop) | ||||
| 	} | ||||
| 	switch r { | ||||
| 	case commentStart: | ||||
| 		lx.push(lexTop) | ||||
| 		return lexCommentStart | ||||
| 	case tableStart: | ||||
| 		return lexTableStart | ||||
| 	case eof: | ||||
| 		if lx.pos > lx.start { | ||||
| 			return lx.errorf("unexpected EOF") | ||||
| 		} | ||||
| 		lx.emit(itemEOF) | ||||
| 		return nil | ||||
| 	} | ||||
|  | ||||
| 	// At this point, the only valid item can be a key, so we back up | ||||
| 	// and let the key lexer do the rest. | ||||
| 	lx.backup() | ||||
| 	lx.push(lexTopEnd) | ||||
| 	return lexKeyStart | ||||
| } | ||||
|  | ||||
| // lexTopEnd is entered whenever a top-level item has been consumed. (A value | ||||
| // or a table.) It must see only whitespace, and will turn back to lexTop | ||||
| // upon a newline. If it sees EOF, it will quit the lexer successfully. | ||||
| func lexTopEnd(lx *lexer) stateFn { | ||||
| 	r := lx.next() | ||||
| 	switch { | ||||
| 	case r == commentStart: | ||||
| 		// a comment will read to a newline for us. | ||||
| 		lx.push(lexTop) | ||||
| 		return lexCommentStart | ||||
| 	case isWhitespace(r): | ||||
| 		return lexTopEnd | ||||
| 	case isNL(r): | ||||
| 		lx.ignore() | ||||
| 		return lexTop | ||||
| 	case r == eof: | ||||
| 		lx.emit(itemEOF) | ||||
| 		return nil | ||||
| 	} | ||||
| 	return lx.errorf("expected a top-level item to end with a newline, "+ | ||||
| 		"comment, or EOF, but got %q instead", r) | ||||
| } | ||||
|  | ||||
| // lexTable lexes the beginning of a table. Namely, it makes sure that | ||||
| // it starts with a character other than '.' and ']'. | ||||
| // It assumes that '[' has already been consumed. | ||||
| // It also handles the case that this is an item in an array of tables. | ||||
| // e.g., '[[name]]'. | ||||
| func lexTableStart(lx *lexer) stateFn { | ||||
| 	if lx.peek() == arrayTableStart { | ||||
| 		lx.next() | ||||
| 		lx.emit(itemArrayTableStart) | ||||
| 		lx.push(lexArrayTableEnd) | ||||
| 	} else { | ||||
| 		lx.emit(itemTableStart) | ||||
| 		lx.push(lexTableEnd) | ||||
| 	} | ||||
| 	return lexTableNameStart | ||||
| } | ||||
|  | ||||
| func lexTableEnd(lx *lexer) stateFn { | ||||
| 	lx.emit(itemTableEnd) | ||||
| 	return lexTopEnd | ||||
| } | ||||
|  | ||||
| func lexArrayTableEnd(lx *lexer) stateFn { | ||||
| 	if r := lx.next(); r != arrayTableEnd { | ||||
| 		return lx.errorf("expected end of table array name delimiter %q, "+ | ||||
| 			"but got %q instead", arrayTableEnd, r) | ||||
| 	} | ||||
| 	lx.emit(itemArrayTableEnd) | ||||
| 	return lexTopEnd | ||||
| } | ||||
|  | ||||
| func lexTableNameStart(lx *lexer) stateFn { | ||||
| 	lx.skip(isWhitespace) | ||||
| 	switch r := lx.peek(); { | ||||
| 	case r == tableEnd || r == eof: | ||||
| 		return lx.errorf("unexpected end of table name " + | ||||
| 			"(table names cannot be empty)") | ||||
| 	case r == tableSep: | ||||
| 		return lx.errorf("unexpected table separator " + | ||||
| 			"(table names cannot be empty)") | ||||
| 	case r == stringStart || r == rawStringStart: | ||||
| 		lx.ignore() | ||||
| 		lx.push(lexTableNameEnd) | ||||
| 		return lexValue // reuse string lexing | ||||
| 	default: | ||||
| 		return lexBareTableName | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // lexBareTableName lexes the name of a table. It assumes that at least one | ||||
| // valid character for the table has already been read. | ||||
| func lexBareTableName(lx *lexer) stateFn { | ||||
| 	r := lx.next() | ||||
| 	if isBareKeyChar(r) { | ||||
| 		return lexBareTableName | ||||
| 	} | ||||
| 	lx.backup() | ||||
| 	lx.emit(itemText) | ||||
| 	return lexTableNameEnd | ||||
| } | ||||
|  | ||||
| // lexTableNameEnd reads the end of a piece of a table name, optionally | ||||
| // consuming whitespace. | ||||
| func lexTableNameEnd(lx *lexer) stateFn { | ||||
| 	lx.skip(isWhitespace) | ||||
| 	switch r := lx.next(); { | ||||
| 	case isWhitespace(r): | ||||
| 		return lexTableNameEnd | ||||
| 	case r == tableSep: | ||||
| 		lx.ignore() | ||||
| 		return lexTableNameStart | ||||
| 	case r == tableEnd: | ||||
| 		return lx.pop() | ||||
| 	default: | ||||
| 		return lx.errorf("expected '.' or ']' to end table name, "+ | ||||
| 			"but got %q instead", r) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // lexKeyStart consumes a key name up until the first non-whitespace character. | ||||
| // lexKeyStart will ignore whitespace. | ||||
| func lexKeyStart(lx *lexer) stateFn { | ||||
| 	r := lx.peek() | ||||
| 	switch { | ||||
| 	case r == keySep: | ||||
| 		return lx.errorf("unexpected key separator %q", keySep) | ||||
| 	case isWhitespace(r) || isNL(r): | ||||
| 		lx.next() | ||||
| 		return lexSkip(lx, lexKeyStart) | ||||
| 	case r == stringStart || r == rawStringStart: | ||||
| 		lx.ignore() | ||||
| 		lx.emit(itemKeyStart) | ||||
| 		lx.push(lexKeyEnd) | ||||
| 		return lexValue // reuse string lexing | ||||
| 	default: | ||||
| 		lx.ignore() | ||||
| 		lx.emit(itemKeyStart) | ||||
| 		return lexBareKey | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // lexBareKey consumes the text of a bare key. Assumes that the first character | ||||
| // (which is not whitespace) has not yet been consumed. | ||||
| func lexBareKey(lx *lexer) stateFn { | ||||
| 	switch r := lx.next(); { | ||||
| 	case isBareKeyChar(r): | ||||
| 		return lexBareKey | ||||
| 	case isWhitespace(r): | ||||
| 		lx.backup() | ||||
| 		lx.emit(itemText) | ||||
| 		return lexKeyEnd | ||||
| 	case r == keySep: | ||||
| 		lx.backup() | ||||
| 		lx.emit(itemText) | ||||
| 		return lexKeyEnd | ||||
| 	default: | ||||
| 		return lx.errorf("bare keys cannot contain %q", r) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // lexKeyEnd consumes the end of a key and trims whitespace (up to the key | ||||
| // separator). | ||||
| func lexKeyEnd(lx *lexer) stateFn { | ||||
| 	switch r := lx.next(); { | ||||
| 	case r == keySep: | ||||
| 		return lexSkip(lx, lexValue) | ||||
| 	case isWhitespace(r): | ||||
| 		return lexSkip(lx, lexKeyEnd) | ||||
| 	default: | ||||
| 		return lx.errorf("expected key separator %q, but got %q instead", | ||||
| 			keySep, r) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // lexValue starts the consumption of a value anywhere a value is expected. | ||||
| // lexValue will ignore whitespace. | ||||
| // After a value is lexed, the last state on the next is popped and returned. | ||||
| func lexValue(lx *lexer) stateFn { | ||||
| 	// We allow whitespace to precede a value, but NOT newlines. | ||||
| 	// In array syntax, the array states are responsible for ignoring newlines. | ||||
| 	r := lx.next() | ||||
| 	switch { | ||||
| 	case isWhitespace(r): | ||||
| 		return lexSkip(lx, lexValue) | ||||
| 	case isDigit(r): | ||||
| 		lx.backup() // avoid an extra state and use the same as above | ||||
| 		return lexNumberOrDateStart | ||||
| 	} | ||||
| 	switch r { | ||||
| 	case arrayStart: | ||||
| 		lx.ignore() | ||||
| 		lx.emit(itemArray) | ||||
| 		return lexArrayValue | ||||
| 	case inlineTableStart: | ||||
| 		lx.ignore() | ||||
| 		lx.emit(itemInlineTableStart) | ||||
| 		return lexInlineTableValue | ||||
| 	case stringStart: | ||||
| 		if lx.accept(stringStart) { | ||||
| 			if lx.accept(stringStart) { | ||||
| 				lx.ignore() // Ignore """ | ||||
| 				return lexMultilineString | ||||
| 			} | ||||
| 			lx.backup() | ||||
| 		} | ||||
| 		lx.ignore() // ignore the '"' | ||||
| 		return lexString | ||||
| 	case rawStringStart: | ||||
| 		if lx.accept(rawStringStart) { | ||||
| 			if lx.accept(rawStringStart) { | ||||
| 				lx.ignore() // Ignore """ | ||||
| 				return lexMultilineRawString | ||||
| 			} | ||||
| 			lx.backup() | ||||
| 		} | ||||
| 		lx.ignore() // ignore the "'" | ||||
| 		return lexRawString | ||||
| 	case '+', '-': | ||||
| 		return lexNumberStart | ||||
| 	case '.': // special error case, be kind to users | ||||
| 		return lx.errorf("floats must start with a digit, not '.'") | ||||
| 	} | ||||
| 	if unicode.IsLetter(r) { | ||||
| 		// Be permissive here; lexBool will give a nice error if the | ||||
| 		// user wrote something like | ||||
| 		//   x = foo | ||||
| 		// (i.e. not 'true' or 'false' but is something else word-like.) | ||||
| 		lx.backup() | ||||
| 		return lexBool | ||||
| 	} | ||||
| 	return lx.errorf("expected value but found %q instead", r) | ||||
| } | ||||
|  | ||||
| // lexArrayValue consumes one value in an array. It assumes that '[' or ',' | ||||
| // have already been consumed. All whitespace and newlines are ignored. | ||||
| func lexArrayValue(lx *lexer) stateFn { | ||||
| 	r := lx.next() | ||||
| 	switch { | ||||
| 	case isWhitespace(r) || isNL(r): | ||||
| 		return lexSkip(lx, lexArrayValue) | ||||
| 	case r == commentStart: | ||||
| 		lx.push(lexArrayValue) | ||||
| 		return lexCommentStart | ||||
| 	case r == comma: | ||||
| 		return lx.errorf("unexpected comma") | ||||
| 	case r == arrayEnd: | ||||
| 		// NOTE(caleb): The spec isn't clear about whether you can have | ||||
| 		// a trailing comma or not, so we'll allow it. | ||||
| 		return lexArrayEnd | ||||
| 	} | ||||
|  | ||||
| 	lx.backup() | ||||
| 	lx.push(lexArrayValueEnd) | ||||
| 	return lexValue | ||||
| } | ||||
|  | ||||
| // lexArrayValueEnd consumes everything between the end of an array value and | ||||
| // the next value (or the end of the array): it ignores whitespace and newlines | ||||
| // and expects either a ',' or a ']'. | ||||
| func lexArrayValueEnd(lx *lexer) stateFn { | ||||
| 	r := lx.next() | ||||
| 	switch { | ||||
| 	case isWhitespace(r) || isNL(r): | ||||
| 		return lexSkip(lx, lexArrayValueEnd) | ||||
| 	case r == commentStart: | ||||
| 		lx.push(lexArrayValueEnd) | ||||
| 		return lexCommentStart | ||||
| 	case r == comma: | ||||
| 		lx.ignore() | ||||
| 		return lexArrayValue // move on to the next value | ||||
| 	case r == arrayEnd: | ||||
| 		return lexArrayEnd | ||||
| 	} | ||||
| 	return lx.errorf( | ||||
| 		"expected a comma or array terminator %q, but got %q instead", | ||||
| 		arrayEnd, r, | ||||
| 	) | ||||
| } | ||||
|  | ||||
| // lexArrayEnd finishes the lexing of an array. | ||||
| // It assumes that a ']' has just been consumed. | ||||
| func lexArrayEnd(lx *lexer) stateFn { | ||||
| 	lx.ignore() | ||||
| 	lx.emit(itemArrayEnd) | ||||
| 	return lx.pop() | ||||
| } | ||||
|  | ||||
| // lexInlineTableValue consumes one key/value pair in an inline table. | ||||
| // It assumes that '{' or ',' have already been consumed. Whitespace is ignored. | ||||
| func lexInlineTableValue(lx *lexer) stateFn { | ||||
| 	r := lx.next() | ||||
| 	switch { | ||||
| 	case isWhitespace(r): | ||||
| 		return lexSkip(lx, lexInlineTableValue) | ||||
| 	case isNL(r): | ||||
| 		return lx.errorf("newlines not allowed within inline tables") | ||||
| 	case r == commentStart: | ||||
| 		lx.push(lexInlineTableValue) | ||||
| 		return lexCommentStart | ||||
| 	case r == comma: | ||||
| 		return lx.errorf("unexpected comma") | ||||
| 	case r == inlineTableEnd: | ||||
| 		return lexInlineTableEnd | ||||
| 	} | ||||
| 	lx.backup() | ||||
| 	lx.push(lexInlineTableValueEnd) | ||||
| 	return lexKeyStart | ||||
| } | ||||
|  | ||||
| // lexInlineTableValueEnd consumes everything between the end of an inline table | ||||
| // key/value pair and the next pair (or the end of the table): | ||||
| // it ignores whitespace and expects either a ',' or a '}'. | ||||
| func lexInlineTableValueEnd(lx *lexer) stateFn { | ||||
| 	r := lx.next() | ||||
| 	switch { | ||||
| 	case isWhitespace(r): | ||||
| 		return lexSkip(lx, lexInlineTableValueEnd) | ||||
| 	case isNL(r): | ||||
| 		return lx.errorf("newlines not allowed within inline tables") | ||||
| 	case r == commentStart: | ||||
| 		lx.push(lexInlineTableValueEnd) | ||||
| 		return lexCommentStart | ||||
| 	case r == comma: | ||||
| 		lx.ignore() | ||||
| 		return lexInlineTableValue | ||||
| 	case r == inlineTableEnd: | ||||
| 		return lexInlineTableEnd | ||||
| 	} | ||||
| 	return lx.errorf("expected a comma or an inline table terminator %q, "+ | ||||
| 		"but got %q instead", inlineTableEnd, r) | ||||
| } | ||||
|  | ||||
| // lexInlineTableEnd finishes the lexing of an inline table. | ||||
| // It assumes that a '}' has just been consumed. | ||||
| func lexInlineTableEnd(lx *lexer) stateFn { | ||||
| 	lx.ignore() | ||||
| 	lx.emit(itemInlineTableEnd) | ||||
| 	return lx.pop() | ||||
| } | ||||
|  | ||||
| // lexString consumes the inner contents of a string. It assumes that the | ||||
| // beginning '"' has already been consumed and ignored. | ||||
| func lexString(lx *lexer) stateFn { | ||||
| 	r := lx.next() | ||||
| 	switch { | ||||
| 	case r == eof: | ||||
| 		return lx.errorf("unexpected EOF") | ||||
| 	case isNL(r): | ||||
| 		return lx.errorf("strings cannot contain newlines") | ||||
| 	case r == '\\': | ||||
| 		lx.push(lexString) | ||||
| 		return lexStringEscape | ||||
| 	case r == stringEnd: | ||||
| 		lx.backup() | ||||
| 		lx.emit(itemString) | ||||
| 		lx.next() | ||||
| 		lx.ignore() | ||||
| 		return lx.pop() | ||||
| 	} | ||||
| 	return lexString | ||||
| } | ||||
|  | ||||
| // lexMultilineString consumes the inner contents of a string. It assumes that | ||||
| // the beginning '"""' has already been consumed and ignored. | ||||
| func lexMultilineString(lx *lexer) stateFn { | ||||
| 	switch lx.next() { | ||||
| 	case eof: | ||||
| 		return lx.errorf("unexpected EOF") | ||||
| 	case '\\': | ||||
| 		return lexMultilineStringEscape | ||||
| 	case stringEnd: | ||||
| 		if lx.accept(stringEnd) { | ||||
| 			if lx.accept(stringEnd) { | ||||
| 				lx.backup() | ||||
| 				lx.backup() | ||||
| 				lx.backup() | ||||
| 				lx.emit(itemMultilineString) | ||||
| 				lx.next() | ||||
| 				lx.next() | ||||
| 				lx.next() | ||||
| 				lx.ignore() | ||||
| 				return lx.pop() | ||||
| 			} | ||||
| 			lx.backup() | ||||
| 		} | ||||
| 	} | ||||
| 	return lexMultilineString | ||||
| } | ||||
|  | ||||
| // lexRawString consumes a raw string. Nothing can be escaped in such a string. | ||||
| // It assumes that the beginning "'" has already been consumed and ignored. | ||||
| func lexRawString(lx *lexer) stateFn { | ||||
| 	r := lx.next() | ||||
| 	switch { | ||||
| 	case r == eof: | ||||
| 		return lx.errorf("unexpected EOF") | ||||
| 	case isNL(r): | ||||
| 		return lx.errorf("strings cannot contain newlines") | ||||
| 	case r == rawStringEnd: | ||||
| 		lx.backup() | ||||
| 		lx.emit(itemRawString) | ||||
| 		lx.next() | ||||
| 		lx.ignore() | ||||
| 		return lx.pop() | ||||
| 	} | ||||
| 	return lexRawString | ||||
| } | ||||
|  | ||||
| // lexMultilineRawString consumes a raw string. Nothing can be escaped in such | ||||
| // a string. It assumes that the beginning "'''" has already been consumed and | ||||
| // ignored. | ||||
| func lexMultilineRawString(lx *lexer) stateFn { | ||||
| 	switch lx.next() { | ||||
| 	case eof: | ||||
| 		return lx.errorf("unexpected EOF") | ||||
| 	case rawStringEnd: | ||||
| 		if lx.accept(rawStringEnd) { | ||||
| 			if lx.accept(rawStringEnd) { | ||||
| 				lx.backup() | ||||
| 				lx.backup() | ||||
| 				lx.backup() | ||||
| 				lx.emit(itemRawMultilineString) | ||||
| 				lx.next() | ||||
| 				lx.next() | ||||
| 				lx.next() | ||||
| 				lx.ignore() | ||||
| 				return lx.pop() | ||||
| 			} | ||||
| 			lx.backup() | ||||
| 		} | ||||
| 	} | ||||
| 	return lexMultilineRawString | ||||
| } | ||||
|  | ||||
| // lexMultilineStringEscape consumes an escaped character. It assumes that the | ||||
| // preceding '\\' has already been consumed. | ||||
| func lexMultilineStringEscape(lx *lexer) stateFn { | ||||
| 	// Handle the special case first: | ||||
| 	if isNL(lx.next()) { | ||||
| 		return lexMultilineString | ||||
| 	} | ||||
| 	lx.backup() | ||||
| 	lx.push(lexMultilineString) | ||||
| 	return lexStringEscape(lx) | ||||
| } | ||||
|  | ||||
| func lexStringEscape(lx *lexer) stateFn { | ||||
| 	r := lx.next() | ||||
| 	switch r { | ||||
| 	case 'b': | ||||
| 		fallthrough | ||||
| 	case 't': | ||||
| 		fallthrough | ||||
| 	case 'n': | ||||
| 		fallthrough | ||||
| 	case 'f': | ||||
| 		fallthrough | ||||
| 	case 'r': | ||||
| 		fallthrough | ||||
| 	case '"': | ||||
| 		fallthrough | ||||
| 	case '\\': | ||||
| 		return lx.pop() | ||||
| 	case 'u': | ||||
| 		return lexShortUnicodeEscape | ||||
| 	case 'U': | ||||
| 		return lexLongUnicodeEscape | ||||
| 	} | ||||
| 	return lx.errorf("invalid escape character %q; only the following "+ | ||||
| 		"escape characters are allowed: "+ | ||||
| 		`\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r) | ||||
| } | ||||
|  | ||||
| func lexShortUnicodeEscape(lx *lexer) stateFn { | ||||
| 	var r rune | ||||
| 	for i := 0; i < 4; i++ { | ||||
| 		r = lx.next() | ||||
| 		if !isHexadecimal(r) { | ||||
| 			return lx.errorf(`expected four hexadecimal digits after '\u', `+ | ||||
| 				"but got %q instead", lx.current()) | ||||
| 		} | ||||
| 	} | ||||
| 	return lx.pop() | ||||
| } | ||||
|  | ||||
| func lexLongUnicodeEscape(lx *lexer) stateFn { | ||||
| 	var r rune | ||||
| 	for i := 0; i < 8; i++ { | ||||
| 		r = lx.next() | ||||
| 		if !isHexadecimal(r) { | ||||
| 			return lx.errorf(`expected eight hexadecimal digits after '\U', `+ | ||||
| 				"but got %q instead", lx.current()) | ||||
| 		} | ||||
| 	} | ||||
| 	return lx.pop() | ||||
| } | ||||
|  | ||||
| // lexNumberOrDateStart consumes either an integer, a float, or datetime. | ||||
| func lexNumberOrDateStart(lx *lexer) stateFn { | ||||
| 	r := lx.next() | ||||
| 	if isDigit(r) { | ||||
| 		return lexNumberOrDate | ||||
| 	} | ||||
| 	switch r { | ||||
| 	case '_': | ||||
| 		return lexNumber | ||||
| 	case 'e', 'E': | ||||
| 		return lexFloat | ||||
| 	case '.': | ||||
| 		return lx.errorf("floats must start with a digit, not '.'") | ||||
| 	} | ||||
| 	return lx.errorf("expected a digit but got %q", r) | ||||
| } | ||||
|  | ||||
| // lexNumberOrDate consumes either an integer, float or datetime. | ||||
| func lexNumberOrDate(lx *lexer) stateFn { | ||||
| 	r := lx.next() | ||||
| 	if isDigit(r) { | ||||
| 		return lexNumberOrDate | ||||
| 	} | ||||
| 	switch r { | ||||
| 	case '-': | ||||
| 		return lexDatetime | ||||
| 	case '_': | ||||
| 		return lexNumber | ||||
| 	case '.', 'e', 'E': | ||||
| 		return lexFloat | ||||
| 	} | ||||
|  | ||||
| 	lx.backup() | ||||
| 	lx.emit(itemInteger) | ||||
| 	return lx.pop() | ||||
| } | ||||
|  | ||||
| // lexDatetime consumes a Datetime, to a first approximation. | ||||
| // The parser validates that it matches one of the accepted formats. | ||||
| func lexDatetime(lx *lexer) stateFn { | ||||
| 	r := lx.next() | ||||
| 	if isDigit(r) { | ||||
| 		return lexDatetime | ||||
| 	} | ||||
| 	switch r { | ||||
| 	case '-', 'T', ':', '.', 'Z', '+': | ||||
| 		return lexDatetime | ||||
| 	} | ||||
|  | ||||
| 	lx.backup() | ||||
| 	lx.emit(itemDatetime) | ||||
| 	return lx.pop() | ||||
| } | ||||
|  | ||||
| // lexNumberStart consumes either an integer or a float. It assumes that a sign | ||||
| // has already been read, but that *no* digits have been consumed. | ||||
| // lexNumberStart will move to the appropriate integer or float states. | ||||
| func lexNumberStart(lx *lexer) stateFn { | ||||
| 	// We MUST see a digit. Even floats have to start with a digit. | ||||
| 	r := lx.next() | ||||
| 	if !isDigit(r) { | ||||
| 		if r == '.' { | ||||
| 			return lx.errorf("floats must start with a digit, not '.'") | ||||
| 		} | ||||
| 		return lx.errorf("expected a digit but got %q", r) | ||||
| 	} | ||||
| 	return lexNumber | ||||
| } | ||||
|  | ||||
| // lexNumber consumes an integer or a float after seeing the first digit. | ||||
| func lexNumber(lx *lexer) stateFn { | ||||
| 	r := lx.next() | ||||
| 	if isDigit(r) { | ||||
| 		return lexNumber | ||||
| 	} | ||||
| 	switch r { | ||||
| 	case '_': | ||||
| 		return lexNumber | ||||
| 	case '.', 'e', 'E': | ||||
| 		return lexFloat | ||||
| 	} | ||||
|  | ||||
| 	lx.backup() | ||||
| 	lx.emit(itemInteger) | ||||
| 	return lx.pop() | ||||
| } | ||||
|  | ||||
| // lexFloat consumes the elements of a float. It allows any sequence of | ||||
| // float-like characters, so floats emitted by the lexer are only a first | ||||
| // approximation and must be validated by the parser. | ||||
| func lexFloat(lx *lexer) stateFn { | ||||
| 	r := lx.next() | ||||
| 	if isDigit(r) { | ||||
| 		return lexFloat | ||||
| 	} | ||||
| 	switch r { | ||||
| 	case '_', '.', '-', '+', 'e', 'E': | ||||
| 		return lexFloat | ||||
| 	} | ||||
|  | ||||
| 	lx.backup() | ||||
| 	lx.emit(itemFloat) | ||||
| 	return lx.pop() | ||||
| } | ||||
|  | ||||
| // lexBool consumes a bool string: 'true' or 'false. | ||||
| func lexBool(lx *lexer) stateFn { | ||||
| 	var rs []rune | ||||
| 	for { | ||||
| 		r := lx.next() | ||||
| 		if !unicode.IsLetter(r) { | ||||
| 			lx.backup() | ||||
| 			break | ||||
| 		} | ||||
| 		rs = append(rs, r) | ||||
| 	} | ||||
| 	s := string(rs) | ||||
| 	switch s { | ||||
| 	case "true", "false": | ||||
| 		lx.emit(itemBool) | ||||
| 		return lx.pop() | ||||
| 	} | ||||
| 	return lx.errorf("expected value but found %q instead", s) | ||||
| } | ||||
|  | ||||
| // lexCommentStart begins the lexing of a comment. It will emit | ||||
| // itemCommentStart and consume no characters, passing control to lexComment. | ||||
| func lexCommentStart(lx *lexer) stateFn { | ||||
| 	lx.ignore() | ||||
| 	lx.emit(itemCommentStart) | ||||
| 	return lexComment | ||||
| } | ||||
|  | ||||
| // lexComment lexes an entire comment. It assumes that '#' has been consumed. | ||||
| // It will consume *up to* the first newline character, and pass control | ||||
| // back to the last state on the stack. | ||||
| func lexComment(lx *lexer) stateFn { | ||||
| 	r := lx.peek() | ||||
| 	if isNL(r) || r == eof { | ||||
| 		lx.emit(itemText) | ||||
| 		return lx.pop() | ||||
| 	} | ||||
| 	lx.next() | ||||
| 	return lexComment | ||||
| } | ||||
|  | ||||
| // lexSkip ignores all slurped input and moves on to the next state. | ||||
| func lexSkip(lx *lexer, nextState stateFn) stateFn { | ||||
| 	return func(lx *lexer) stateFn { | ||||
| 		lx.ignore() | ||||
| 		return nextState | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // isWhitespace returns true if `r` is a whitespace character according | ||||
| // to the spec. | ||||
| func isWhitespace(r rune) bool { | ||||
| 	return r == '\t' || r == ' ' | ||||
| } | ||||
|  | ||||
| func isNL(r rune) bool { | ||||
| 	return r == '\n' || r == '\r' | ||||
| } | ||||
|  | ||||
| func isDigit(r rune) bool { | ||||
| 	return r >= '0' && r <= '9' | ||||
| } | ||||
|  | ||||
| func isHexadecimal(r rune) bool { | ||||
| 	return (r >= '0' && r <= '9') || | ||||
| 		(r >= 'a' && r <= 'f') || | ||||
| 		(r >= 'A' && r <= 'F') | ||||
| } | ||||
|  | ||||
| func isBareKeyChar(r rune) bool { | ||||
| 	return (r >= 'A' && r <= 'Z') || | ||||
| 		(r >= 'a' && r <= 'z') || | ||||
| 		(r >= '0' && r <= '9') || | ||||
| 		r == '_' || | ||||
| 		r == '-' | ||||
| } | ||||
|  | ||||
| func (itype itemType) String() string { | ||||
| 	switch itype { | ||||
| 	case itemError: | ||||
| 		return "Error" | ||||
| 	case itemNIL: | ||||
| 		return "NIL" | ||||
| 	case itemEOF: | ||||
| 		return "EOF" | ||||
| 	case itemText: | ||||
| 		return "Text" | ||||
| 	case itemString, itemRawString, itemMultilineString, itemRawMultilineString: | ||||
| 		return "String" | ||||
| 	case itemBool: | ||||
| 		return "Bool" | ||||
| 	case itemInteger: | ||||
| 		return "Integer" | ||||
| 	case itemFloat: | ||||
| 		return "Float" | ||||
| 	case itemDatetime: | ||||
| 		return "DateTime" | ||||
| 	case itemTableStart: | ||||
| 		return "TableStart" | ||||
| 	case itemTableEnd: | ||||
| 		return "TableEnd" | ||||
| 	case itemKeyStart: | ||||
| 		return "KeyStart" | ||||
| 	case itemArray: | ||||
| 		return "Array" | ||||
| 	case itemArrayEnd: | ||||
| 		return "ArrayEnd" | ||||
| 	case itemCommentStart: | ||||
| 		return "CommentStart" | ||||
| 	} | ||||
| 	panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype))) | ||||
| } | ||||
|  | ||||
| func (item item) String() string { | ||||
| 	return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val) | ||||
| } | ||||
							
								
								
									
										592
									
								
								vendor/github.com/BurntSushi/toml/parse.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										592
									
								
								vendor/github.com/BurntSushi/toml/parse.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,592 @@ | ||||
| package toml | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"strconv" | ||||
| 	"strings" | ||||
| 	"time" | ||||
| 	"unicode" | ||||
| 	"unicode/utf8" | ||||
| ) | ||||
|  | ||||
| type parser struct { | ||||
| 	mapping map[string]interface{} | ||||
| 	types   map[string]tomlType | ||||
| 	lx      *lexer | ||||
|  | ||||
| 	// A list of keys in the order that they appear in the TOML data. | ||||
| 	ordered []Key | ||||
|  | ||||
| 	// the full key for the current hash in scope | ||||
| 	context Key | ||||
|  | ||||
| 	// the base key name for everything except hashes | ||||
| 	currentKey string | ||||
|  | ||||
| 	// rough approximation of line number | ||||
| 	approxLine int | ||||
|  | ||||
| 	// A map of 'key.group.names' to whether they were created implicitly. | ||||
| 	implicits map[string]bool | ||||
| } | ||||
|  | ||||
| type parseError string | ||||
|  | ||||
| func (pe parseError) Error() string { | ||||
| 	return string(pe) | ||||
| } | ||||
|  | ||||
| func parse(data string) (p *parser, err error) { | ||||
| 	defer func() { | ||||
| 		if r := recover(); r != nil { | ||||
| 			var ok bool | ||||
| 			if err, ok = r.(parseError); ok { | ||||
| 				return | ||||
| 			} | ||||
| 			panic(r) | ||||
| 		} | ||||
| 	}() | ||||
|  | ||||
| 	p = &parser{ | ||||
| 		mapping:   make(map[string]interface{}), | ||||
| 		types:     make(map[string]tomlType), | ||||
| 		lx:        lex(data), | ||||
| 		ordered:   make([]Key, 0), | ||||
| 		implicits: make(map[string]bool), | ||||
| 	} | ||||
| 	for { | ||||
| 		item := p.next() | ||||
| 		if item.typ == itemEOF { | ||||
| 			break | ||||
| 		} | ||||
| 		p.topLevel(item) | ||||
| 	} | ||||
|  | ||||
| 	return p, nil | ||||
| } | ||||
|  | ||||
| func (p *parser) panicf(format string, v ...interface{}) { | ||||
| 	msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s", | ||||
| 		p.approxLine, p.current(), fmt.Sprintf(format, v...)) | ||||
| 	panic(parseError(msg)) | ||||
| } | ||||
|  | ||||
| func (p *parser) next() item { | ||||
| 	it := p.lx.nextItem() | ||||
| 	if it.typ == itemError { | ||||
| 		p.panicf("%s", it.val) | ||||
| 	} | ||||
| 	return it | ||||
| } | ||||
|  | ||||
| func (p *parser) bug(format string, v ...interface{}) { | ||||
| 	panic(fmt.Sprintf("BUG: "+format+"\n\n", v...)) | ||||
| } | ||||
|  | ||||
| func (p *parser) expect(typ itemType) item { | ||||
| 	it := p.next() | ||||
| 	p.assertEqual(typ, it.typ) | ||||
| 	return it | ||||
| } | ||||
|  | ||||
| func (p *parser) assertEqual(expected, got itemType) { | ||||
| 	if expected != got { | ||||
| 		p.bug("Expected '%s' but got '%s'.", expected, got) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (p *parser) topLevel(item item) { | ||||
| 	switch item.typ { | ||||
| 	case itemCommentStart: | ||||
| 		p.approxLine = item.line | ||||
| 		p.expect(itemText) | ||||
| 	case itemTableStart: | ||||
| 		kg := p.next() | ||||
| 		p.approxLine = kg.line | ||||
|  | ||||
| 		var key Key | ||||
| 		for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() { | ||||
| 			key = append(key, p.keyString(kg)) | ||||
| 		} | ||||
| 		p.assertEqual(itemTableEnd, kg.typ) | ||||
|  | ||||
| 		p.establishContext(key, false) | ||||
| 		p.setType("", tomlHash) | ||||
| 		p.ordered = append(p.ordered, key) | ||||
| 	case itemArrayTableStart: | ||||
| 		kg := p.next() | ||||
| 		p.approxLine = kg.line | ||||
|  | ||||
| 		var key Key | ||||
| 		for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() { | ||||
| 			key = append(key, p.keyString(kg)) | ||||
| 		} | ||||
| 		p.assertEqual(itemArrayTableEnd, kg.typ) | ||||
|  | ||||
| 		p.establishContext(key, true) | ||||
| 		p.setType("", tomlArrayHash) | ||||
| 		p.ordered = append(p.ordered, key) | ||||
| 	case itemKeyStart: | ||||
| 		kname := p.next() | ||||
| 		p.approxLine = kname.line | ||||
| 		p.currentKey = p.keyString(kname) | ||||
|  | ||||
| 		val, typ := p.value(p.next()) | ||||
| 		p.setValue(p.currentKey, val) | ||||
| 		p.setType(p.currentKey, typ) | ||||
| 		p.ordered = append(p.ordered, p.context.add(p.currentKey)) | ||||
| 		p.currentKey = "" | ||||
| 	default: | ||||
| 		p.bug("Unexpected type at top level: %s", item.typ) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Gets a string for a key (or part of a key in a table name). | ||||
| func (p *parser) keyString(it item) string { | ||||
| 	switch it.typ { | ||||
| 	case itemText: | ||||
| 		return it.val | ||||
| 	case itemString, itemMultilineString, | ||||
| 		itemRawString, itemRawMultilineString: | ||||
| 		s, _ := p.value(it) | ||||
| 		return s.(string) | ||||
| 	default: | ||||
| 		p.bug("Unexpected key type: %s", it.typ) | ||||
| 		panic("unreachable") | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // value translates an expected value from the lexer into a Go value wrapped | ||||
| // as an empty interface. | ||||
| func (p *parser) value(it item) (interface{}, tomlType) { | ||||
| 	switch it.typ { | ||||
| 	case itemString: | ||||
| 		return p.replaceEscapes(it.val), p.typeOfPrimitive(it) | ||||
| 	case itemMultilineString: | ||||
| 		trimmed := stripFirstNewline(stripEscapedWhitespace(it.val)) | ||||
| 		return p.replaceEscapes(trimmed), p.typeOfPrimitive(it) | ||||
| 	case itemRawString: | ||||
| 		return it.val, p.typeOfPrimitive(it) | ||||
| 	case itemRawMultilineString: | ||||
| 		return stripFirstNewline(it.val), p.typeOfPrimitive(it) | ||||
| 	case itemBool: | ||||
| 		switch it.val { | ||||
| 		case "true": | ||||
| 			return true, p.typeOfPrimitive(it) | ||||
| 		case "false": | ||||
| 			return false, p.typeOfPrimitive(it) | ||||
| 		} | ||||
| 		p.bug("Expected boolean value, but got '%s'.", it.val) | ||||
| 	case itemInteger: | ||||
| 		if !numUnderscoresOK(it.val) { | ||||
| 			p.panicf("Invalid integer %q: underscores must be surrounded by digits", | ||||
| 				it.val) | ||||
| 		} | ||||
| 		val := strings.Replace(it.val, "_", "", -1) | ||||
| 		num, err := strconv.ParseInt(val, 10, 64) | ||||
| 		if err != nil { | ||||
| 			// Distinguish integer values. Normally, it'd be a bug if the lexer | ||||
| 			// provides an invalid integer, but it's possible that the number is | ||||
| 			// out of range of valid values (which the lexer cannot determine). | ||||
| 			// So mark the former as a bug but the latter as a legitimate user | ||||
| 			// error. | ||||
| 			if e, ok := err.(*strconv.NumError); ok && | ||||
| 				e.Err == strconv.ErrRange { | ||||
|  | ||||
| 				p.panicf("Integer '%s' is out of the range of 64-bit "+ | ||||
| 					"signed integers.", it.val) | ||||
| 			} else { | ||||
| 				p.bug("Expected integer value, but got '%s'.", it.val) | ||||
| 			} | ||||
| 		} | ||||
| 		return num, p.typeOfPrimitive(it) | ||||
| 	case itemFloat: | ||||
| 		parts := strings.FieldsFunc(it.val, func(r rune) bool { | ||||
| 			switch r { | ||||
| 			case '.', 'e', 'E': | ||||
| 				return true | ||||
| 			} | ||||
| 			return false | ||||
| 		}) | ||||
| 		for _, part := range parts { | ||||
| 			if !numUnderscoresOK(part) { | ||||
| 				p.panicf("Invalid float %q: underscores must be "+ | ||||
| 					"surrounded by digits", it.val) | ||||
| 			} | ||||
| 		} | ||||
| 		if !numPeriodsOK(it.val) { | ||||
| 			// As a special case, numbers like '123.' or '1.e2', | ||||
| 			// which are valid as far as Go/strconv are concerned, | ||||
| 			// must be rejected because TOML says that a fractional | ||||
| 			// part consists of '.' followed by 1+ digits. | ||||
| 			p.panicf("Invalid float %q: '.' must be followed "+ | ||||
| 				"by one or more digits", it.val) | ||||
| 		} | ||||
| 		val := strings.Replace(it.val, "_", "", -1) | ||||
| 		num, err := strconv.ParseFloat(val, 64) | ||||
| 		if err != nil { | ||||
| 			if e, ok := err.(*strconv.NumError); ok && | ||||
| 				e.Err == strconv.ErrRange { | ||||
|  | ||||
| 				p.panicf("Float '%s' is out of the range of 64-bit "+ | ||||
| 					"IEEE-754 floating-point numbers.", it.val) | ||||
| 			} else { | ||||
| 				p.panicf("Invalid float value: %q", it.val) | ||||
| 			} | ||||
| 		} | ||||
| 		return num, p.typeOfPrimitive(it) | ||||
| 	case itemDatetime: | ||||
| 		var t time.Time | ||||
| 		var ok bool | ||||
| 		var err error | ||||
| 		for _, format := range []string{ | ||||
| 			"2006-01-02T15:04:05Z07:00", | ||||
| 			"2006-01-02T15:04:05", | ||||
| 			"2006-01-02", | ||||
| 		} { | ||||
| 			t, err = time.ParseInLocation(format, it.val, time.Local) | ||||
| 			if err == nil { | ||||
| 				ok = true | ||||
| 				break | ||||
| 			} | ||||
| 		} | ||||
| 		if !ok { | ||||
| 			p.panicf("Invalid TOML Datetime: %q.", it.val) | ||||
| 		} | ||||
| 		return t, p.typeOfPrimitive(it) | ||||
| 	case itemArray: | ||||
| 		array := make([]interface{}, 0) | ||||
| 		types := make([]tomlType, 0) | ||||
|  | ||||
| 		for it = p.next(); it.typ != itemArrayEnd; it = p.next() { | ||||
| 			if it.typ == itemCommentStart { | ||||
| 				p.expect(itemText) | ||||
| 				continue | ||||
| 			} | ||||
|  | ||||
| 			val, typ := p.value(it) | ||||
| 			array = append(array, val) | ||||
| 			types = append(types, typ) | ||||
| 		} | ||||
| 		return array, p.typeOfArray(types) | ||||
| 	case itemInlineTableStart: | ||||
| 		var ( | ||||
| 			hash         = make(map[string]interface{}) | ||||
| 			outerContext = p.context | ||||
| 			outerKey     = p.currentKey | ||||
| 		) | ||||
|  | ||||
| 		p.context = append(p.context, p.currentKey) | ||||
| 		p.currentKey = "" | ||||
| 		for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() { | ||||
| 			if it.typ != itemKeyStart { | ||||
| 				p.bug("Expected key start but instead found %q, around line %d", | ||||
| 					it.val, p.approxLine) | ||||
| 			} | ||||
| 			if it.typ == itemCommentStart { | ||||
| 				p.expect(itemText) | ||||
| 				continue | ||||
| 			} | ||||
|  | ||||
| 			// retrieve key | ||||
| 			k := p.next() | ||||
| 			p.approxLine = k.line | ||||
| 			kname := p.keyString(k) | ||||
|  | ||||
| 			// retrieve value | ||||
| 			p.currentKey = kname | ||||
| 			val, typ := p.value(p.next()) | ||||
| 			// make sure we keep metadata up to date | ||||
| 			p.setType(kname, typ) | ||||
| 			p.ordered = append(p.ordered, p.context.add(p.currentKey)) | ||||
| 			hash[kname] = val | ||||
| 		} | ||||
| 		p.context = outerContext | ||||
| 		p.currentKey = outerKey | ||||
| 		return hash, tomlHash | ||||
| 	} | ||||
| 	p.bug("Unexpected value type: %s", it.typ) | ||||
| 	panic("unreachable") | ||||
| } | ||||
|  | ||||
| // numUnderscoresOK checks whether each underscore in s is surrounded by | ||||
| // characters that are not underscores. | ||||
| func numUnderscoresOK(s string) bool { | ||||
| 	accept := false | ||||
| 	for _, r := range s { | ||||
| 		if r == '_' { | ||||
| 			if !accept { | ||||
| 				return false | ||||
| 			} | ||||
| 			accept = false | ||||
| 			continue | ||||
| 		} | ||||
| 		accept = true | ||||
| 	} | ||||
| 	return accept | ||||
| } | ||||
|  | ||||
| // numPeriodsOK checks whether every period in s is followed by a digit. | ||||
| func numPeriodsOK(s string) bool { | ||||
| 	period := false | ||||
| 	for _, r := range s { | ||||
| 		if period && !isDigit(r) { | ||||
| 			return false | ||||
| 		} | ||||
| 		period = r == '.' | ||||
| 	} | ||||
| 	return !period | ||||
| } | ||||
|  | ||||
| // establishContext sets the current context of the parser, | ||||
| // where the context is either a hash or an array of hashes. Which one is | ||||
| // set depends on the value of the `array` parameter. | ||||
| // | ||||
| // Establishing the context also makes sure that the key isn't a duplicate, and | ||||
| // will create implicit hashes automatically. | ||||
| func (p *parser) establishContext(key Key, array bool) { | ||||
| 	var ok bool | ||||
|  | ||||
| 	// Always start at the top level and drill down for our context. | ||||
| 	hashContext := p.mapping | ||||
| 	keyContext := make(Key, 0) | ||||
|  | ||||
| 	// We only need implicit hashes for key[0:-1] | ||||
| 	for _, k := range key[0 : len(key)-1] { | ||||
| 		_, ok = hashContext[k] | ||||
| 		keyContext = append(keyContext, k) | ||||
|  | ||||
| 		// No key? Make an implicit hash and move on. | ||||
| 		if !ok { | ||||
| 			p.addImplicit(keyContext) | ||||
| 			hashContext[k] = make(map[string]interface{}) | ||||
| 		} | ||||
|  | ||||
| 		// If the hash context is actually an array of tables, then set | ||||
| 		// the hash context to the last element in that array. | ||||
| 		// | ||||
| 		// Otherwise, it better be a table, since this MUST be a key group (by | ||||
| 		// virtue of it not being the last element in a key). | ||||
| 		switch t := hashContext[k].(type) { | ||||
| 		case []map[string]interface{}: | ||||
| 			hashContext = t[len(t)-1] | ||||
| 		case map[string]interface{}: | ||||
| 			hashContext = t | ||||
| 		default: | ||||
| 			p.panicf("Key '%s' was already created as a hash.", keyContext) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	p.context = keyContext | ||||
| 	if array { | ||||
| 		// If this is the first element for this array, then allocate a new | ||||
| 		// list of tables for it. | ||||
| 		k := key[len(key)-1] | ||||
| 		if _, ok := hashContext[k]; !ok { | ||||
| 			hashContext[k] = make([]map[string]interface{}, 0, 5) | ||||
| 		} | ||||
|  | ||||
| 		// Add a new table. But make sure the key hasn't already been used | ||||
| 		// for something else. | ||||
| 		if hash, ok := hashContext[k].([]map[string]interface{}); ok { | ||||
| 			hashContext[k] = append(hash, make(map[string]interface{})) | ||||
| 		} else { | ||||
| 			p.panicf("Key '%s' was already created and cannot be used as "+ | ||||
| 				"an array.", keyContext) | ||||
| 		} | ||||
| 	} else { | ||||
| 		p.setValue(key[len(key)-1], make(map[string]interface{})) | ||||
| 	} | ||||
| 	p.context = append(p.context, key[len(key)-1]) | ||||
| } | ||||
|  | ||||
| // setValue sets the given key to the given value in the current context. | ||||
| // It will make sure that the key hasn't already been defined, account for | ||||
| // implicit key groups. | ||||
| func (p *parser) setValue(key string, value interface{}) { | ||||
| 	var tmpHash interface{} | ||||
| 	var ok bool | ||||
|  | ||||
| 	hash := p.mapping | ||||
| 	keyContext := make(Key, 0) | ||||
| 	for _, k := range p.context { | ||||
| 		keyContext = append(keyContext, k) | ||||
| 		if tmpHash, ok = hash[k]; !ok { | ||||
| 			p.bug("Context for key '%s' has not been established.", keyContext) | ||||
| 		} | ||||
| 		switch t := tmpHash.(type) { | ||||
| 		case []map[string]interface{}: | ||||
| 			// The context is a table of hashes. Pick the most recent table | ||||
| 			// defined as the current hash. | ||||
| 			hash = t[len(t)-1] | ||||
| 		case map[string]interface{}: | ||||
| 			hash = t | ||||
| 		default: | ||||
| 			p.bug("Expected hash to have type 'map[string]interface{}', but "+ | ||||
| 				"it has '%T' instead.", tmpHash) | ||||
| 		} | ||||
| 	} | ||||
| 	keyContext = append(keyContext, key) | ||||
|  | ||||
| 	if _, ok := hash[key]; ok { | ||||
| 		// Typically, if the given key has already been set, then we have | ||||
| 		// to raise an error since duplicate keys are disallowed. However, | ||||
| 		// it's possible that a key was previously defined implicitly. In this | ||||
| 		// case, it is allowed to be redefined concretely. (See the | ||||
| 		// `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.) | ||||
| 		// | ||||
| 		// But we have to make sure to stop marking it as an implicit. (So that | ||||
| 		// another redefinition provokes an error.) | ||||
| 		// | ||||
| 		// Note that since it has already been defined (as a hash), we don't | ||||
| 		// want to overwrite it. So our business is done. | ||||
| 		if p.isImplicit(keyContext) { | ||||
| 			p.removeImplicit(keyContext) | ||||
| 			return | ||||
| 		} | ||||
|  | ||||
| 		// Otherwise, we have a concrete key trying to override a previous | ||||
| 		// key, which is *always* wrong. | ||||
| 		p.panicf("Key '%s' has already been defined.", keyContext) | ||||
| 	} | ||||
| 	hash[key] = value | ||||
| } | ||||
|  | ||||
| // setType sets the type of a particular value at a given key. | ||||
| // It should be called immediately AFTER setValue. | ||||
| // | ||||
| // Note that if `key` is empty, then the type given will be applied to the | ||||
| // current context (which is either a table or an array of tables). | ||||
| func (p *parser) setType(key string, typ tomlType) { | ||||
| 	keyContext := make(Key, 0, len(p.context)+1) | ||||
| 	for _, k := range p.context { | ||||
| 		keyContext = append(keyContext, k) | ||||
| 	} | ||||
| 	if len(key) > 0 { // allow type setting for hashes | ||||
| 		keyContext = append(keyContext, key) | ||||
| 	} | ||||
| 	p.types[keyContext.String()] = typ | ||||
| } | ||||
|  | ||||
| // addImplicit sets the given Key as having been created implicitly. | ||||
| func (p *parser) addImplicit(key Key) { | ||||
| 	p.implicits[key.String()] = true | ||||
| } | ||||
|  | ||||
| // removeImplicit stops tagging the given key as having been implicitly | ||||
| // created. | ||||
| func (p *parser) removeImplicit(key Key) { | ||||
| 	p.implicits[key.String()] = false | ||||
| } | ||||
|  | ||||
| // isImplicit returns true if the key group pointed to by the key was created | ||||
| // implicitly. | ||||
| func (p *parser) isImplicit(key Key) bool { | ||||
| 	return p.implicits[key.String()] | ||||
| } | ||||
|  | ||||
| // current returns the full key name of the current context. | ||||
| func (p *parser) current() string { | ||||
| 	if len(p.currentKey) == 0 { | ||||
| 		return p.context.String() | ||||
| 	} | ||||
| 	if len(p.context) == 0 { | ||||
| 		return p.currentKey | ||||
| 	} | ||||
| 	return fmt.Sprintf("%s.%s", p.context, p.currentKey) | ||||
| } | ||||
|  | ||||
| func stripFirstNewline(s string) string { | ||||
| 	if len(s) == 0 || s[0] != '\n' { | ||||
| 		return s | ||||
| 	} | ||||
| 	return s[1:] | ||||
| } | ||||
|  | ||||
| func stripEscapedWhitespace(s string) string { | ||||
| 	esc := strings.Split(s, "\\\n") | ||||
| 	if len(esc) > 1 { | ||||
| 		for i := 1; i < len(esc); i++ { | ||||
| 			esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace) | ||||
| 		} | ||||
| 	} | ||||
| 	return strings.Join(esc, "") | ||||
| } | ||||
|  | ||||
| func (p *parser) replaceEscapes(str string) string { | ||||
| 	var replaced []rune | ||||
| 	s := []byte(str) | ||||
| 	r := 0 | ||||
| 	for r < len(s) { | ||||
| 		if s[r] != '\\' { | ||||
| 			c, size := utf8.DecodeRune(s[r:]) | ||||
| 			r += size | ||||
| 			replaced = append(replaced, c) | ||||
| 			continue | ||||
| 		} | ||||
| 		r += 1 | ||||
| 		if r >= len(s) { | ||||
| 			p.bug("Escape sequence at end of string.") | ||||
| 			return "" | ||||
| 		} | ||||
| 		switch s[r] { | ||||
| 		default: | ||||
| 			p.bug("Expected valid escape code after \\, but got %q.", s[r]) | ||||
| 			return "" | ||||
| 		case 'b': | ||||
| 			replaced = append(replaced, rune(0x0008)) | ||||
| 			r += 1 | ||||
| 		case 't': | ||||
| 			replaced = append(replaced, rune(0x0009)) | ||||
| 			r += 1 | ||||
| 		case 'n': | ||||
| 			replaced = append(replaced, rune(0x000A)) | ||||
| 			r += 1 | ||||
| 		case 'f': | ||||
| 			replaced = append(replaced, rune(0x000C)) | ||||
| 			r += 1 | ||||
| 		case 'r': | ||||
| 			replaced = append(replaced, rune(0x000D)) | ||||
| 			r += 1 | ||||
| 		case '"': | ||||
| 			replaced = append(replaced, rune(0x0022)) | ||||
| 			r += 1 | ||||
| 		case '\\': | ||||
| 			replaced = append(replaced, rune(0x005C)) | ||||
| 			r += 1 | ||||
| 		case 'u': | ||||
| 			// At this point, we know we have a Unicode escape of the form | ||||
| 			// `uXXXX` at [r, r+5). (Because the lexer guarantees this | ||||
| 			// for us.) | ||||
| 			escaped := p.asciiEscapeToUnicode(s[r+1 : r+5]) | ||||
| 			replaced = append(replaced, escaped) | ||||
| 			r += 5 | ||||
| 		case 'U': | ||||
| 			// At this point, we know we have a Unicode escape of the form | ||||
| 			// `uXXXX` at [r, r+9). (Because the lexer guarantees this | ||||
| 			// for us.) | ||||
| 			escaped := p.asciiEscapeToUnicode(s[r+1 : r+9]) | ||||
| 			replaced = append(replaced, escaped) | ||||
| 			r += 9 | ||||
| 		} | ||||
| 	} | ||||
| 	return string(replaced) | ||||
| } | ||||
|  | ||||
| func (p *parser) asciiEscapeToUnicode(bs []byte) rune { | ||||
| 	s := string(bs) | ||||
| 	hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32) | ||||
| 	if err != nil { | ||||
| 		p.bug("Could not parse '%s' as a hexadecimal number, but the "+ | ||||
| 			"lexer claims it's OK: %s", s, err) | ||||
| 	} | ||||
| 	if !utf8.ValidRune(rune(hex)) { | ||||
| 		p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s) | ||||
| 	} | ||||
| 	return rune(hex) | ||||
| } | ||||
|  | ||||
| func isStringType(ty itemType) bool { | ||||
| 	return ty == itemString || ty == itemMultilineString || | ||||
| 		ty == itemRawString || ty == itemRawMultilineString | ||||
| } | ||||
							
								
								
									
										1
									
								
								vendor/github.com/BurntSushi/toml/session.vim
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										1
									
								
								vendor/github.com/BurntSushi/toml/session.vim
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1 @@ | ||||
| au BufWritePost *.go silent!make tags > /dev/null 2>&1 | ||||
							
								
								
									
										91
									
								
								vendor/github.com/BurntSushi/toml/type_check.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										91
									
								
								vendor/github.com/BurntSushi/toml/type_check.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,91 @@ | ||||
| package toml | ||||
|  | ||||
| // tomlType represents any Go type that corresponds to a TOML type. | ||||
| // While the first draft of the TOML spec has a simplistic type system that | ||||
| // probably doesn't need this level of sophistication, we seem to be militating | ||||
| // toward adding real composite types. | ||||
| type tomlType interface { | ||||
| 	typeString() string | ||||
| } | ||||
|  | ||||
| // typeEqual accepts any two types and returns true if they are equal. | ||||
| func typeEqual(t1, t2 tomlType) bool { | ||||
| 	if t1 == nil || t2 == nil { | ||||
| 		return false | ||||
| 	} | ||||
| 	return t1.typeString() == t2.typeString() | ||||
| } | ||||
|  | ||||
| func typeIsHash(t tomlType) bool { | ||||
| 	return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash) | ||||
| } | ||||
|  | ||||
| type tomlBaseType string | ||||
|  | ||||
| func (btype tomlBaseType) typeString() string { | ||||
| 	return string(btype) | ||||
| } | ||||
|  | ||||
| func (btype tomlBaseType) String() string { | ||||
| 	return btype.typeString() | ||||
| } | ||||
|  | ||||
| var ( | ||||
| 	tomlInteger   tomlBaseType = "Integer" | ||||
| 	tomlFloat     tomlBaseType = "Float" | ||||
| 	tomlDatetime  tomlBaseType = "Datetime" | ||||
| 	tomlString    tomlBaseType = "String" | ||||
| 	tomlBool      tomlBaseType = "Bool" | ||||
| 	tomlArray     tomlBaseType = "Array" | ||||
| 	tomlHash      tomlBaseType = "Hash" | ||||
| 	tomlArrayHash tomlBaseType = "ArrayHash" | ||||
| ) | ||||
|  | ||||
| // typeOfPrimitive returns a tomlType of any primitive value in TOML. | ||||
| // Primitive values are: Integer, Float, Datetime, String and Bool. | ||||
| // | ||||
| // Passing a lexer item other than the following will cause a BUG message | ||||
| // to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime. | ||||
| func (p *parser) typeOfPrimitive(lexItem item) tomlType { | ||||
| 	switch lexItem.typ { | ||||
| 	case itemInteger: | ||||
| 		return tomlInteger | ||||
| 	case itemFloat: | ||||
| 		return tomlFloat | ||||
| 	case itemDatetime: | ||||
| 		return tomlDatetime | ||||
| 	case itemString: | ||||
| 		return tomlString | ||||
| 	case itemMultilineString: | ||||
| 		return tomlString | ||||
| 	case itemRawString: | ||||
| 		return tomlString | ||||
| 	case itemRawMultilineString: | ||||
| 		return tomlString | ||||
| 	case itemBool: | ||||
| 		return tomlBool | ||||
| 	} | ||||
| 	p.bug("Cannot infer primitive type of lex item '%s'.", lexItem) | ||||
| 	panic("unreachable") | ||||
| } | ||||
|  | ||||
| // typeOfArray returns a tomlType for an array given a list of types of its | ||||
| // values. | ||||
| // | ||||
| // In the current spec, if an array is homogeneous, then its type is always | ||||
| // "Array". If the array is not homogeneous, an error is generated. | ||||
| func (p *parser) typeOfArray(types []tomlType) tomlType { | ||||
| 	// Empty arrays are cool. | ||||
| 	if len(types) == 0 { | ||||
| 		return tomlArray | ||||
| 	} | ||||
|  | ||||
| 	theType := types[0] | ||||
| 	for _, t := range types[1:] { | ||||
| 		if !typeEqual(theType, t) { | ||||
| 			p.panicf("Array contains values of type '%s' and '%s', but "+ | ||||
| 				"arrays must be homogeneous.", theType, t) | ||||
| 		} | ||||
| 	} | ||||
| 	return tomlArray | ||||
| } | ||||
							
								
								
									
										242
									
								
								vendor/github.com/BurntSushi/toml/type_fields.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										242
									
								
								vendor/github.com/BurntSushi/toml/type_fields.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,242 @@ | ||||
| package toml | ||||
|  | ||||
| // Struct field handling is adapted from code in encoding/json: | ||||
| // | ||||
| // Copyright 2010 The Go Authors.  All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the Go distribution. | ||||
|  | ||||
| import ( | ||||
| 	"reflect" | ||||
| 	"sort" | ||||
| 	"sync" | ||||
| ) | ||||
|  | ||||
| // A field represents a single field found in a struct. | ||||
| type field struct { | ||||
| 	name  string       // the name of the field (`toml` tag included) | ||||
| 	tag   bool         // whether field has a `toml` tag | ||||
| 	index []int        // represents the depth of an anonymous field | ||||
| 	typ   reflect.Type // the type of the field | ||||
| } | ||||
|  | ||||
| // byName sorts field by name, breaking ties with depth, | ||||
| // then breaking ties with "name came from toml tag", then | ||||
| // breaking ties with index sequence. | ||||
| type byName []field | ||||
|  | ||||
| func (x byName) Len() int { return len(x) } | ||||
|  | ||||
| func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } | ||||
|  | ||||
| func (x byName) Less(i, j int) bool { | ||||
| 	if x[i].name != x[j].name { | ||||
| 		return x[i].name < x[j].name | ||||
| 	} | ||||
| 	if len(x[i].index) != len(x[j].index) { | ||||
| 		return len(x[i].index) < len(x[j].index) | ||||
| 	} | ||||
| 	if x[i].tag != x[j].tag { | ||||
| 		return x[i].tag | ||||
| 	} | ||||
| 	return byIndex(x).Less(i, j) | ||||
| } | ||||
|  | ||||
| // byIndex sorts field by index sequence. | ||||
| type byIndex []field | ||||
|  | ||||
| func (x byIndex) Len() int { return len(x) } | ||||
|  | ||||
| func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } | ||||
|  | ||||
| func (x byIndex) Less(i, j int) bool { | ||||
| 	for k, xik := range x[i].index { | ||||
| 		if k >= len(x[j].index) { | ||||
| 			return false | ||||
| 		} | ||||
| 		if xik != x[j].index[k] { | ||||
| 			return xik < x[j].index[k] | ||||
| 		} | ||||
| 	} | ||||
| 	return len(x[i].index) < len(x[j].index) | ||||
| } | ||||
|  | ||||
| // typeFields returns a list of fields that TOML should recognize for the given | ||||
| // type. The algorithm is breadth-first search over the set of structs to | ||||
| // include - the top struct and then any reachable anonymous structs. | ||||
| func typeFields(t reflect.Type) []field { | ||||
| 	// Anonymous fields to explore at the current level and the next. | ||||
| 	current := []field{} | ||||
| 	next := []field{{typ: t}} | ||||
|  | ||||
| 	// Count of queued names for current level and the next. | ||||
| 	count := map[reflect.Type]int{} | ||||
| 	nextCount := map[reflect.Type]int{} | ||||
|  | ||||
| 	// Types already visited at an earlier level. | ||||
| 	visited := map[reflect.Type]bool{} | ||||
|  | ||||
| 	// Fields found. | ||||
| 	var fields []field | ||||
|  | ||||
| 	for len(next) > 0 { | ||||
| 		current, next = next, current[:0] | ||||
| 		count, nextCount = nextCount, map[reflect.Type]int{} | ||||
|  | ||||
| 		for _, f := range current { | ||||
| 			if visited[f.typ] { | ||||
| 				continue | ||||
| 			} | ||||
| 			visited[f.typ] = true | ||||
|  | ||||
| 			// Scan f.typ for fields to include. | ||||
| 			for i := 0; i < f.typ.NumField(); i++ { | ||||
| 				sf := f.typ.Field(i) | ||||
| 				if sf.PkgPath != "" && !sf.Anonymous { // unexported | ||||
| 					continue | ||||
| 				} | ||||
| 				opts := getOptions(sf.Tag) | ||||
| 				if opts.skip { | ||||
| 					continue | ||||
| 				} | ||||
| 				index := make([]int, len(f.index)+1) | ||||
| 				copy(index, f.index) | ||||
| 				index[len(f.index)] = i | ||||
|  | ||||
| 				ft := sf.Type | ||||
| 				if ft.Name() == "" && ft.Kind() == reflect.Ptr { | ||||
| 					// Follow pointer. | ||||
| 					ft = ft.Elem() | ||||
| 				} | ||||
|  | ||||
| 				// Record found field and index sequence. | ||||
| 				if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { | ||||
| 					tagged := opts.name != "" | ||||
| 					name := opts.name | ||||
| 					if name == "" { | ||||
| 						name = sf.Name | ||||
| 					} | ||||
| 					fields = append(fields, field{name, tagged, index, ft}) | ||||
| 					if count[f.typ] > 1 { | ||||
| 						// If there were multiple instances, add a second, | ||||
| 						// so that the annihilation code will see a duplicate. | ||||
| 						// It only cares about the distinction between 1 or 2, | ||||
| 						// so don't bother generating any more copies. | ||||
| 						fields = append(fields, fields[len(fields)-1]) | ||||
| 					} | ||||
| 					continue | ||||
| 				} | ||||
|  | ||||
| 				// Record new anonymous struct to explore in next round. | ||||
| 				nextCount[ft]++ | ||||
| 				if nextCount[ft] == 1 { | ||||
| 					f := field{name: ft.Name(), index: index, typ: ft} | ||||
| 					next = append(next, f) | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	sort.Sort(byName(fields)) | ||||
|  | ||||
| 	// Delete all fields that are hidden by the Go rules for embedded fields, | ||||
| 	// except that fields with TOML tags are promoted. | ||||
|  | ||||
| 	// The fields are sorted in primary order of name, secondary order | ||||
| 	// of field index length. Loop over names; for each name, delete | ||||
| 	// hidden fields by choosing the one dominant field that survives. | ||||
| 	out := fields[:0] | ||||
| 	for advance, i := 0, 0; i < len(fields); i += advance { | ||||
| 		// One iteration per name. | ||||
| 		// Find the sequence of fields with the name of this first field. | ||||
| 		fi := fields[i] | ||||
| 		name := fi.name | ||||
| 		for advance = 1; i+advance < len(fields); advance++ { | ||||
| 			fj := fields[i+advance] | ||||
| 			if fj.name != name { | ||||
| 				break | ||||
| 			} | ||||
| 		} | ||||
| 		if advance == 1 { // Only one field with this name | ||||
| 			out = append(out, fi) | ||||
| 			continue | ||||
| 		} | ||||
| 		dominant, ok := dominantField(fields[i : i+advance]) | ||||
| 		if ok { | ||||
| 			out = append(out, dominant) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	fields = out | ||||
| 	sort.Sort(byIndex(fields)) | ||||
|  | ||||
| 	return fields | ||||
| } | ||||
|  | ||||
| // dominantField looks through the fields, all of which are known to | ||||
| // have the same name, to find the single field that dominates the | ||||
| // others using Go's embedding rules, modified by the presence of | ||||
| // TOML tags. If there are multiple top-level fields, the boolean | ||||
| // will be false: This condition is an error in Go and we skip all | ||||
| // the fields. | ||||
| func dominantField(fields []field) (field, bool) { | ||||
| 	// The fields are sorted in increasing index-length order. The winner | ||||
| 	// must therefore be one with the shortest index length. Drop all | ||||
| 	// longer entries, which is easy: just truncate the slice. | ||||
| 	length := len(fields[0].index) | ||||
| 	tagged := -1 // Index of first tagged field. | ||||
| 	for i, f := range fields { | ||||
| 		if len(f.index) > length { | ||||
| 			fields = fields[:i] | ||||
| 			break | ||||
| 		} | ||||
| 		if f.tag { | ||||
| 			if tagged >= 0 { | ||||
| 				// Multiple tagged fields at the same level: conflict. | ||||
| 				// Return no field. | ||||
| 				return field{}, false | ||||
| 			} | ||||
| 			tagged = i | ||||
| 		} | ||||
| 	} | ||||
| 	if tagged >= 0 { | ||||
| 		return fields[tagged], true | ||||
| 	} | ||||
| 	// All remaining fields have the same length. If there's more than one, | ||||
| 	// we have a conflict (two fields named "X" at the same level) and we | ||||
| 	// return no field. | ||||
| 	if len(fields) > 1 { | ||||
| 		return field{}, false | ||||
| 	} | ||||
| 	return fields[0], true | ||||
| } | ||||
|  | ||||
| var fieldCache struct { | ||||
| 	sync.RWMutex | ||||
| 	m map[reflect.Type][]field | ||||
| } | ||||
|  | ||||
| // cachedTypeFields is like typeFields but uses a cache to avoid repeated work. | ||||
| func cachedTypeFields(t reflect.Type) []field { | ||||
| 	fieldCache.RLock() | ||||
| 	f := fieldCache.m[t] | ||||
| 	fieldCache.RUnlock() | ||||
| 	if f != nil { | ||||
| 		return f | ||||
| 	} | ||||
|  | ||||
| 	// Compute fields without lock. | ||||
| 	// Might duplicate effort but won't hold other computations back. | ||||
| 	f = typeFields(t) | ||||
| 	if f == nil { | ||||
| 		f = []field{} | ||||
| 	} | ||||
|  | ||||
| 	fieldCache.Lock() | ||||
| 	if fieldCache.m == nil { | ||||
| 		fieldCache.m = map[reflect.Type][]field{} | ||||
| 	} | ||||
| 	fieldCache.m[t] = f | ||||
| 	fieldCache.Unlock() | ||||
| 	return f | ||||
| } | ||||
							
								
								
									
										30
									
								
								vendor/golang.org/x/tools/go/analysis/BUILD
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										30
									
								
								vendor/golang.org/x/tools/go/analysis/BUILD
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,30 @@ | ||||
| load("@io_bazel_rules_go//go:def.bzl", "go_library") | ||||
|  | ||||
| go_library( | ||||
|     name = "go_default_library", | ||||
|     srcs = [ | ||||
|         "analysis.go", | ||||
|         "doc.go", | ||||
|         "validate.go", | ||||
|     ], | ||||
|     importmap = "k8s.io/kubernetes/vendor/golang.org/x/tools/go/analysis", | ||||
|     importpath = "golang.org/x/tools/go/analysis", | ||||
|     visibility = ["//visibility:public"], | ||||
| ) | ||||
|  | ||||
| filegroup( | ||||
|     name = "package-srcs", | ||||
|     srcs = glob(["**"]), | ||||
|     tags = ["automanaged"], | ||||
|     visibility = ["//visibility:private"], | ||||
| ) | ||||
|  | ||||
| filegroup( | ||||
|     name = "all-srcs", | ||||
|     srcs = [ | ||||
|         ":package-srcs", | ||||
|         "//vendor/golang.org/x/tools/go/analysis/passes/inspect:all-srcs", | ||||
|     ], | ||||
|     tags = ["automanaged"], | ||||
|     visibility = ["//visibility:public"], | ||||
| ) | ||||
							
								
								
									
										193
									
								
								vendor/golang.org/x/tools/go/analysis/analysis.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										193
									
								
								vendor/golang.org/x/tools/go/analysis/analysis.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,193 @@ | ||||
| package analysis | ||||
|  | ||||
| import ( | ||||
| 	"flag" | ||||
| 	"fmt" | ||||
| 	"go/ast" | ||||
| 	"go/token" | ||||
| 	"go/types" | ||||
| 	"reflect" | ||||
| ) | ||||
|  | ||||
| // An Analyzer describes an analysis function and its options. | ||||
| type Analyzer struct { | ||||
| 	// The Name of the analyzer must be a valid Go identifier | ||||
| 	// as it may appear in command-line flags, URLs, and so on. | ||||
| 	Name string | ||||
|  | ||||
| 	// Doc is the documentation for the analyzer. | ||||
| 	// The part before the first "\n\n" is the title | ||||
| 	// (no capital or period, max ~60 letters). | ||||
| 	Doc string | ||||
|  | ||||
| 	// Flags defines any flags accepted by the analyzer. | ||||
| 	// The manner in which these flags are exposed to the user | ||||
| 	// depends on the driver which runs the analyzer. | ||||
| 	Flags flag.FlagSet | ||||
|  | ||||
| 	// Run applies the analyzer to a package. | ||||
| 	// It returns an error if the analyzer failed. | ||||
| 	// | ||||
| 	// On success, the Run function may return a result | ||||
| 	// computed by the Analyzer; its type must match ResultType. | ||||
| 	// The driver makes this result available as an input to | ||||
| 	// another Analyzer that depends directly on this one (see | ||||
| 	// Requires) when it analyzes the same package. | ||||
| 	// | ||||
| 	// To pass analysis results between packages (and thus | ||||
| 	// potentially between address spaces), use Facts, which are | ||||
| 	// serializable. | ||||
| 	Run func(*Pass) (interface{}, error) | ||||
|  | ||||
| 	// RunDespiteErrors allows the driver to invoke | ||||
| 	// the Run method of this analyzer even on a | ||||
| 	// package that contains parse or type errors. | ||||
| 	RunDespiteErrors bool | ||||
|  | ||||
| 	// Requires is a set of analyzers that must run successfully | ||||
| 	// before this one on a given package. This analyzer may inspect | ||||
| 	// the outputs produced by each analyzer in Requires. | ||||
| 	// The graph over analyzers implied by Requires edges must be acyclic. | ||||
| 	// | ||||
| 	// Requires establishes a "horizontal" dependency between | ||||
| 	// analysis passes (different analyzers, same package). | ||||
| 	Requires []*Analyzer | ||||
|  | ||||
| 	// ResultType is the type of the optional result of the Run function. | ||||
| 	ResultType reflect.Type | ||||
|  | ||||
| 	// FactTypes indicates that this analyzer imports and exports | ||||
| 	// Facts of the specified concrete types. | ||||
| 	// An analyzer that uses facts may assume that its import | ||||
| 	// dependencies have been similarly analyzed before it runs. | ||||
| 	// Facts must be pointers. | ||||
| 	// | ||||
| 	// FactTypes establishes a "vertical" dependency between | ||||
| 	// analysis passes (same analyzer, different packages). | ||||
| 	FactTypes []Fact | ||||
| } | ||||
|  | ||||
| func (a *Analyzer) String() string { return a.Name } | ||||
|  | ||||
| // A Pass provides information to the Run function that | ||||
| // applies a specific analyzer to a single Go package. | ||||
| // | ||||
| // It forms the interface between the analysis logic and the driver | ||||
| // program, and has both input and an output components. | ||||
| // | ||||
| // As in a compiler, one pass may depend on the result computed by another. | ||||
| // | ||||
| // The Run function should not call any of the Pass functions concurrently. | ||||
| type Pass struct { | ||||
| 	Analyzer *Analyzer // the identity of the current analyzer | ||||
|  | ||||
| 	// syntax and type information | ||||
| 	Fset       *token.FileSet // file position information | ||||
| 	Files      []*ast.File    // the abstract syntax tree of each file | ||||
| 	OtherFiles []string       // names of non-Go files of this package | ||||
| 	Pkg        *types.Package // type information about the package | ||||
| 	TypesInfo  *types.Info    // type information about the syntax trees | ||||
| 	TypesSizes types.Sizes    // function for computing sizes of types | ||||
|  | ||||
| 	// Report reports a Diagnostic, a finding about a specific location | ||||
| 	// in the analyzed source code such as a potential mistake. | ||||
| 	// It may be called by the Run function. | ||||
| 	Report func(Diagnostic) | ||||
|  | ||||
| 	// ResultOf provides the inputs to this analysis pass, which are | ||||
| 	// the corresponding results of its prerequisite analyzers. | ||||
| 	// The map keys are the elements of Analysis.Required, | ||||
| 	// and the type of each corresponding value is the required | ||||
| 	// analysis's ResultType. | ||||
| 	ResultOf map[*Analyzer]interface{} | ||||
|  | ||||
| 	// -- facts -- | ||||
|  | ||||
| 	// ImportObjectFact retrieves a fact associated with obj. | ||||
| 	// Given a value ptr of type *T, where *T satisfies Fact, | ||||
| 	// ImportObjectFact copies the value to *ptr. | ||||
| 	// | ||||
| 	// ImportObjectFact panics if called after the pass is complete. | ||||
| 	// ImportObjectFact is not concurrency-safe. | ||||
| 	ImportObjectFact func(obj types.Object, fact Fact) bool | ||||
|  | ||||
| 	// ImportPackageFact retrieves a fact associated with package pkg, | ||||
| 	// which must be this package or one of its dependencies. | ||||
| 	// See comments for ImportObjectFact. | ||||
| 	ImportPackageFact func(pkg *types.Package, fact Fact) bool | ||||
|  | ||||
| 	// ExportObjectFact associates a fact of type *T with the obj, | ||||
| 	// replacing any previous fact of that type. | ||||
| 	// | ||||
| 	// ExportObjectFact panics if it is called after the pass is | ||||
| 	// complete, or if obj does not belong to the package being analyzed. | ||||
| 	// ExportObjectFact is not concurrency-safe. | ||||
| 	ExportObjectFact func(obj types.Object, fact Fact) | ||||
|  | ||||
| 	// ExportPackageFact associates a fact with the current package. | ||||
| 	// See comments for ExportObjectFact. | ||||
| 	ExportPackageFact func(fact Fact) | ||||
|  | ||||
| 	/* Further fields may be added in future. */ | ||||
| 	// For example, suggested or applied refactorings. | ||||
| } | ||||
|  | ||||
| // Reportf is a helper function that reports a Diagnostic using the | ||||
| // specified position and formatted error message. | ||||
| func (pass *Pass) Reportf(pos token.Pos, format string, args ...interface{}) { | ||||
| 	msg := fmt.Sprintf(format, args...) | ||||
| 	pass.Report(Diagnostic{Pos: pos, Message: msg}) | ||||
| } | ||||
|  | ||||
| func (pass *Pass) String() string { | ||||
| 	return fmt.Sprintf("%s@%s", pass.Analyzer.Name, pass.Pkg.Path()) | ||||
| } | ||||
|  | ||||
| // A Fact is an intermediate fact produced during analysis. | ||||
| // | ||||
| // Each fact is associated with a named declaration (a types.Object) or | ||||
| // with a package as a whole. A single object or package may have | ||||
| // multiple associated facts, but only one of any particular fact type. | ||||
| // | ||||
| // A Fact represents a predicate such as "never returns", but does not | ||||
| // represent the subject of the predicate such as "function F" or "package P". | ||||
| // | ||||
| // Facts may be produced in one analysis pass and consumed by another | ||||
| // analysis pass even if these are in different address spaces. | ||||
| // If package P imports Q, all facts about Q produced during | ||||
| // analysis of that package will be available during later analysis of P. | ||||
| // Facts are analogous to type export data in a build system: | ||||
| // just as export data enables separate compilation of several passes, | ||||
| // facts enable "separate analysis". | ||||
| // | ||||
| // Each pass (a, p) starts with the set of facts produced by the | ||||
| // same analyzer a applied to the packages directly imported by p. | ||||
| // The analysis may add facts to the set, and they may be exported in turn. | ||||
| // An analysis's Run function may retrieve facts by calling | ||||
| // Pass.Import{Object,Package}Fact and update them using | ||||
| // Pass.Export{Object,Package}Fact. | ||||
| // | ||||
| // A fact is logically private to its Analysis. To pass values | ||||
| // between different analyzers, use the results mechanism; | ||||
| // see Analyzer.Requires, Analyzer.ResultType, and Pass.ResultOf. | ||||
| // | ||||
| // A Fact type must be a pointer. | ||||
| // Facts are encoded and decoded using encoding/gob. | ||||
| // A Fact may implement the GobEncoder/GobDecoder interfaces | ||||
| // to customize its encoding. Fact encoding should not fail. | ||||
| // | ||||
| // A Fact should not be modified once exported. | ||||
| type Fact interface { | ||||
| 	AFact() // dummy method to avoid type errors | ||||
| } | ||||
|  | ||||
| // A Diagnostic is a message associated with a source location. | ||||
| // | ||||
| // An Analyzer may return a variety of diagnostics; the optional Category, | ||||
| // which should be a constant, may be used to classify them. | ||||
| // It is primarily intended to make it easy to look up documentation. | ||||
| type Diagnostic struct { | ||||
| 	Pos      token.Pos | ||||
| 	Category string // optional | ||||
| 	Message  string | ||||
| } | ||||
							
								
								
									
										336
									
								
								vendor/golang.org/x/tools/go/analysis/doc.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										336
									
								
								vendor/golang.org/x/tools/go/analysis/doc.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,336 @@ | ||||
| /* | ||||
|  | ||||
| The analysis package defines the interface between a modular static | ||||
| analysis and an analysis driver program. | ||||
|  | ||||
| Background | ||||
|  | ||||
| A static analysis is a function that inspects a package of Go code and | ||||
| reports a set of diagnostics (typically mistakes in the code), and | ||||
| perhaps produces other results as well, such as suggested refactorings | ||||
| or other facts. An analysis that reports mistakes is informally called a | ||||
| "checker". For example, the printf checker reports mistakes in | ||||
| fmt.Printf format strings. | ||||
|  | ||||
| A "modular" analysis is one that inspects one package at a time but can | ||||
| save information from a lower-level package and use it when inspecting a | ||||
| higher-level package, analogous to separate compilation in a toolchain. | ||||
| The printf checker is modular: when it discovers that a function such as | ||||
| log.Fatalf delegates to fmt.Printf, it records this fact, and checks | ||||
| calls to that function too, including calls made from another package. | ||||
|  | ||||
| By implementing a common interface, checkers from a variety of sources | ||||
| can be easily selected, incorporated, and reused in a wide range of | ||||
| driver programs including command-line tools (such as vet), text editors and | ||||
| IDEs, build and test systems (such as go build, Bazel, or Buck), test | ||||
| frameworks, code review tools, code-base indexers (such as SourceGraph), | ||||
| documentation viewers (such as godoc), batch pipelines for large code | ||||
| bases, and so on. | ||||
|  | ||||
|  | ||||
| Analyzer | ||||
|  | ||||
| The primary type in the API is Analyzer. An Analyzer statically | ||||
| describes an analysis function: its name, documentation, flags, | ||||
| relationship to other analyzers, and of course, its logic. | ||||
|  | ||||
| To define an analysis, a user declares a (logically constant) variable | ||||
| of type Analyzer. Here is a typical example from one of the analyzers in | ||||
| the go/analysis/passes/ subdirectory: | ||||
|  | ||||
| 	package unusedresult | ||||
|  | ||||
| 	var Analyzer = &analysis.Analyzer{ | ||||
| 		Name:	"unusedresult", | ||||
| 		Doc:	"check for unused results of calls to some functions", | ||||
| 		Run:    run, | ||||
| 		... | ||||
| 	} | ||||
|  | ||||
| 	func run(pass *analysis.Pass) (interface{}, error) { | ||||
| 		... | ||||
| 	} | ||||
|  | ||||
|  | ||||
| An analysis driver is a program such as vet that runs a set of | ||||
| analyses and prints the diagnostics that they report. | ||||
| The driver program must import the list of Analyzers it needs. | ||||
| Typically each Analyzer resides in a separate package. | ||||
| To add a new Analyzer to an existing driver, add another item to the list: | ||||
|  | ||||
| 	import ( "unusedresult"; "nilness"; "printf" ) | ||||
|  | ||||
| 	var analyses = []*analysis.Analyzer{ | ||||
| 		unusedresult.Analyzer, | ||||
| 		nilness.Analyzer, | ||||
| 		printf.Analyzer, | ||||
| 	} | ||||
|  | ||||
| A driver may use the name, flags, and documentation to provide on-line | ||||
| help that describes the analyses its performs. | ||||
| The doc comment contains a brief one-line summary, | ||||
| optionally followed by paragraphs of explanation. | ||||
| The vet command, shown below, is an example of a driver that runs | ||||
| multiple analyzers. It is based on the multichecker package | ||||
| (see the "Standalone commands" section for details). | ||||
|  | ||||
| 	$ go build golang.org/x/tools/go/analysis/cmd/vet | ||||
| 	$ ./vet help | ||||
| 	vet is a tool for static analysis of Go programs. | ||||
|  | ||||
| 	Usage: vet [-flag] [package] | ||||
|  | ||||
| 	Registered analyzers: | ||||
|  | ||||
| 	    asmdecl      report mismatches between assembly files and Go declarations | ||||
| 	    assign       check for useless assignments | ||||
| 	    atomic       check for common mistakes using the sync/atomic package | ||||
| 	    ... | ||||
| 	    unusedresult check for unused results of calls to some functions | ||||
|  | ||||
| 	$ ./vet help unusedresult | ||||
| 	unusedresult: check for unused results of calls to some functions | ||||
|  | ||||
| 	Analyzer flags: | ||||
|  | ||||
| 	  -unusedresult.funcs value | ||||
| 	        comma-separated list of functions whose results must be used (default Error,String) | ||||
| 	  -unusedresult.stringmethods value | ||||
| 	        comma-separated list of names of methods of type func() string whose results must be used | ||||
|  | ||||
| 	Some functions like fmt.Errorf return a result and have no side effects, | ||||
| 	so it is always a mistake to discard the result. This analyzer reports | ||||
| 	calls to certain functions in which the result of the call is ignored. | ||||
|  | ||||
| 	The set of functions may be controlled using flags. | ||||
|  | ||||
| The Analyzer type has more fields besides those shown above: | ||||
|  | ||||
| 	type Analyzer struct { | ||||
| 		Name			string | ||||
| 		Doc			string | ||||
| 		Flags			flag.FlagSet | ||||
| 		Run			func(*Pass) (interface{}, error) | ||||
| 		RunDespiteErrors	bool | ||||
| 		ResultType		reflect.Type | ||||
| 		Requires		[]*Analyzer | ||||
| 		FactTypes		[]Fact | ||||
| 	} | ||||
|  | ||||
| The Flags field declares a set of named (global) flag variables that | ||||
| control analysis behavior. Unlike vet, analysis flags are not declared | ||||
| directly in the command line FlagSet; it is up to the driver to set the | ||||
| flag variables. A driver for a single analysis, a, might expose its flag | ||||
| f directly on the command line as -f, whereas a driver for multiple | ||||
| analyses might prefix the flag name by the analysis name (-a.f) to avoid | ||||
| ambiguity. An IDE might expose the flags through a graphical interface, | ||||
| and a batch pipeline might configure them from a config file. | ||||
| See the "findcall" analyzer for an example of flags in action. | ||||
|  | ||||
| The RunDespiteErrors flag indicates whether the analysis is equipped to | ||||
| handle ill-typed code. If not, the driver will skip the analysis if | ||||
| there were parse or type errors. | ||||
| The optional ResultType field specifies the type of the result value | ||||
| computed by this analysis and made available to other analyses. | ||||
| The Requires field specifies a list of analyses upon which | ||||
| this one depends and whose results it may access, and it constrains the | ||||
| order in which a driver may run analyses. | ||||
| The FactTypes field is discussed in the section on Modularity. | ||||
| The analysis package provides a Validate function to perform basic | ||||
| sanity checks on an Analyzer, such as that its Requires graph is | ||||
| acyclic, its fact and result types are unique, and so on. | ||||
|  | ||||
| Finally, the Run field contains a function to be called by the driver to | ||||
| execute the analysis on a single package. The driver passes it an | ||||
| instance of the Pass type. | ||||
|  | ||||
|  | ||||
| Pass | ||||
|  | ||||
| A Pass describes a single unit of work: the application of a particular | ||||
| Analyzer to a particular package of Go code. | ||||
| The Pass provides information to the Analyzer's Run function about the | ||||
| package being analyzed, and provides operations to the Run function for | ||||
| reporting diagnostics and other information back to the driver. | ||||
|  | ||||
| 	type Pass struct { | ||||
| 		Fset   		*token.FileSet | ||||
| 		Files		[]*ast.File | ||||
| 		OtherFiles	[]string | ||||
| 		Pkg		*types.Package | ||||
| 		TypesInfo	*types.Info | ||||
| 		ResultOf	map[*Analyzer]interface{} | ||||
| 		Report		func(Diagnostic) | ||||
| 		... | ||||
| 	} | ||||
|  | ||||
| The Fset, Files, Pkg, and TypesInfo fields provide the syntax trees, | ||||
| type information, and source positions for a single package of Go code. | ||||
|  | ||||
| The OtherFiles field provides the names, but not the contents, of non-Go | ||||
| files such as assembly that are part of this package. See the "asmdecl" | ||||
| or "buildtags" analyzers for examples of loading non-Go files and report | ||||
| diagnostics against them. | ||||
|  | ||||
| The ResultOf field provides the results computed by the analyzers | ||||
| required by this one, as expressed in its Analyzer.Requires field. The | ||||
| driver runs the required analyzers first and makes their results | ||||
| available in this map. Each Analyzer must return a value of the type | ||||
| described in its Analyzer.ResultType field. | ||||
| For example, the "ctrlflow" analyzer returns a *ctrlflow.CFGs, which | ||||
| provides a control-flow graph for each function in the package (see | ||||
| golang.org/x/tools/go/cfg); the "inspect" analyzer returns a value that | ||||
| enables other Analyzers to traverse the syntax trees of the package more | ||||
| efficiently; and the "buildssa" analyzer constructs an SSA-form | ||||
| intermediate representation. | ||||
| Each of these Analyzers extends the capabilities of later Analyzers | ||||
| without adding a dependency to the core API, so an analysis tool pays | ||||
| only for the extensions it needs. | ||||
|  | ||||
| The Report function emits a diagnostic, a message associated with a | ||||
| source position. For most analyses, diagnostics are their primary | ||||
| result. | ||||
| For convenience, Pass provides a helper method, Reportf, to report a new | ||||
| diagnostic by formatting a string. | ||||
| Diagnostic is defined as: | ||||
|  | ||||
| 	type Diagnostic struct { | ||||
| 		Pos      token.Pos | ||||
| 		Category string // optional | ||||
| 		Message  string | ||||
| 	} | ||||
|  | ||||
| The optional Category field is a short identifier that classifies the | ||||
| kind of message when an analysis produces several kinds of diagnostic. | ||||
|  | ||||
| Most Analyzers inspect typed Go syntax trees, but a few, such as asmdecl | ||||
| and buildtag, inspect the raw text of Go source files or even non-Go | ||||
| files such as assembly. To report a diagnostic against a line of a | ||||
| raw text file, use the following sequence: | ||||
|  | ||||
| 	content, err := ioutil.ReadFile(filename) | ||||
| 	if err != nil { ... } | ||||
| 	tf := fset.AddFile(filename, -1, len(content)) | ||||
| 	tf.SetLinesForContent(content) | ||||
| 	... | ||||
| 	pass.Reportf(tf.LineStart(line), "oops") | ||||
|  | ||||
|  | ||||
| Modular analysis with Facts | ||||
|  | ||||
| To improve efficiency and scalability, large programs are routinely | ||||
| built using separate compilation: units of the program are compiled | ||||
| separately, and recompiled only when one of their dependencies changes; | ||||
| independent modules may be compiled in parallel. The same technique may | ||||
| be applied to static analyses, for the same benefits. Such analyses are | ||||
| described as "modular". | ||||
|  | ||||
| A compiler’s type checker is an example of a modular static analysis. | ||||
| Many other checkers we would like to apply to Go programs can be | ||||
| understood as alternative or non-standard type systems. For example, | ||||
| vet's printf checker infers whether a function has the "printf wrapper" | ||||
| type, and it applies stricter checks to calls of such functions. In | ||||
| addition, it records which functions are printf wrappers for use by | ||||
| later analysis units to identify other printf wrappers by induction. | ||||
| A result such as “f is a printf wrapper” that is not interesting by | ||||
| itself but serves as a stepping stone to an interesting result (such as | ||||
| a diagnostic) is called a "fact". | ||||
|  | ||||
| The analysis API allows an analysis to define new types of facts, to | ||||
| associate facts of these types with objects (named entities) declared | ||||
| within the current package, or with the package as a whole, and to query | ||||
| for an existing fact of a given type associated with an object or | ||||
| package. | ||||
|  | ||||
| An Analyzer that uses facts must declare their types: | ||||
|  | ||||
| 	var Analyzer = &analysis.Analyzer{ | ||||
| 		Name:       "printf", | ||||
| 		FactTypes: []analysis.Fact{new(isWrapper)}, | ||||
| 		... | ||||
| 	} | ||||
|  | ||||
| 	type isWrapper struct{} // => *types.Func f “is a printf wrapper” | ||||
|  | ||||
| A driver program ensures that facts for a pass’s dependencies are | ||||
| generated before analyzing the pass and are responsible for propagating | ||||
| facts between from one pass to another, possibly across address spaces. | ||||
| Consequently, Facts must be serializable. The API requires that drivers | ||||
| use the gob encoding, an efficient, robust, self-describing binary | ||||
| protocol. A fact type may implement the GobEncoder/GobDecoder interfaces | ||||
| if the default encoding is unsuitable. Facts should be stateless. | ||||
|  | ||||
| The Pass type has functions to import and export facts, | ||||
| associated either with an object or with a package: | ||||
|  | ||||
| 	type Pass struct { | ||||
| 		... | ||||
| 		ExportObjectFact func(types.Object, Fact) | ||||
| 		ImportObjectFact func(types.Object, Fact) bool | ||||
|  | ||||
| 		ExportPackageFact func(fact Fact) | ||||
| 		ImportPackageFact func(*types.Package, Fact) bool | ||||
| 	} | ||||
|  | ||||
| An Analyzer may only export facts associated with the current package or | ||||
| its objects, though it may import facts from any package or object that | ||||
| is an import dependency of the current package. | ||||
|  | ||||
| Conceptually, ExportObjectFact(obj, fact) inserts fact into a hidden map keyed by | ||||
| the pair (obj, TypeOf(fact)), and the ImportObjectFact function | ||||
| retrieves the entry from this map and copies its value into the variable | ||||
| pointed to by fact. This scheme assumes that the concrete type of fact | ||||
| is a pointer; this assumption is checked by the Validate function. | ||||
| See the "printf" analyzer for an example of object facts in action. | ||||
|  | ||||
| Some driver implementations (such as those based on Bazel and Blaze) do | ||||
| not currently apply analyzers to packages of the standard library. | ||||
| Therefore, for best results, analyzer authors should not rely on | ||||
| analysis facts being available for standard packages. | ||||
| For example, although the printf checker is capable of deducing during | ||||
| analysis of the log package that log.Printf is a printf-wrapper, | ||||
| this fact is built in to the analyzer so that it correctly checks | ||||
| calls to log.Printf even when run in a driver that does not apply | ||||
| it to standard packages. We plan to remove this limitation in future. | ||||
|  | ||||
|  | ||||
| Testing an Analyzer | ||||
|  | ||||
| The analysistest subpackage provides utilities for testing an Analyzer. | ||||
| In a few lines of code, it is possible to run an analyzer on a package | ||||
| of testdata files and check that it reported all the expected | ||||
| diagnostics and facts (and no more). Expectations are expressed using | ||||
| "// want ..." comments in the input code. | ||||
|  | ||||
|  | ||||
| Standalone commands | ||||
|  | ||||
| Analyzers are provided in the form of packages that a driver program is | ||||
| expected to import. The vet command imports a set of several analyzers, | ||||
| but users may wish to define their own analysis commands that perform | ||||
| additional checks. To simplify the task of creating an analysis command, | ||||
| either for a single analyzer or for a whole suite, we provide the | ||||
| singlechecker and multichecker subpackages. | ||||
|  | ||||
| The singlechecker package provides the main function for a command that | ||||
| runs one analyzer. By convention, each analyzer such as | ||||
| go/passes/findcall should be accompanied by a singlechecker-based | ||||
| command such as go/analysis/passes/findcall/cmd/findcall, defined in its | ||||
| entirety as: | ||||
|  | ||||
| 	package main | ||||
|  | ||||
| 	import ( | ||||
| 		"golang.org/x/tools/go/analysis/passes/findcall" | ||||
| 		"golang.org/x/tools/go/analysis/singlechecker" | ||||
| 	) | ||||
|  | ||||
| 	func main() { singlechecker.Main(findcall.Analyzer) } | ||||
|  | ||||
| A tool that provides multiple analyzers can use multichecker in a | ||||
| similar way, giving it the list of Analyzers. | ||||
|  | ||||
|  | ||||
|  | ||||
| */ | ||||
| package analysis | ||||
							
								
								
									
										27
									
								
								vendor/golang.org/x/tools/go/analysis/passes/inspect/BUILD
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										27
									
								
								vendor/golang.org/x/tools/go/analysis/passes/inspect/BUILD
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,27 @@ | ||||
| load("@io_bazel_rules_go//go:def.bzl", "go_library") | ||||
|  | ||||
| go_library( | ||||
|     name = "go_default_library", | ||||
|     srcs = ["inspect.go"], | ||||
|     importmap = "k8s.io/kubernetes/vendor/golang.org/x/tools/go/analysis/passes/inspect", | ||||
|     importpath = "golang.org/x/tools/go/analysis/passes/inspect", | ||||
|     visibility = ["//visibility:public"], | ||||
|     deps = [ | ||||
|         "//vendor/golang.org/x/tools/go/analysis:go_default_library", | ||||
|         "//vendor/golang.org/x/tools/go/ast/inspector:go_default_library", | ||||
|     ], | ||||
| ) | ||||
|  | ||||
| filegroup( | ||||
|     name = "package-srcs", | ||||
|     srcs = glob(["**"]), | ||||
|     tags = ["automanaged"], | ||||
|     visibility = ["//visibility:private"], | ||||
| ) | ||||
|  | ||||
| filegroup( | ||||
|     name = "all-srcs", | ||||
|     srcs = [":package-srcs"], | ||||
|     tags = ["automanaged"], | ||||
|     visibility = ["//visibility:public"], | ||||
| ) | ||||
							
								
								
									
										49
									
								
								vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										49
									
								
								vendor/golang.org/x/tools/go/analysis/passes/inspect/inspect.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,49 @@ | ||||
| // Copyright 2018 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| // Package inspect defines an Analyzer that provides an AST inspector | ||||
| // (golang.org/x/tools/go/ast/inspect.Inspect) for the syntax trees of a | ||||
| // package. It is only a building block for other analyzers. | ||||
| // | ||||
| // Example of use in another analysis: | ||||
| // | ||||
| //	import ( | ||||
| //		"golang.org/x/tools/go/analysis" | ||||
| //		"golang.org/x/tools/go/analysis/passes/inspect" | ||||
| //		"golang.org/x/tools/go/ast/inspector" | ||||
| //	) | ||||
| // | ||||
| //	var Analyzer = &analysis.Analyzer{ | ||||
| //		... | ||||
| //		Requires:       reflect.TypeOf(new(inspect.Analyzer)), | ||||
| //	} | ||||
| // | ||||
| // 	func run(pass *analysis.Pass) (interface{}, error) { | ||||
| // 		inspect := pass.ResultOf[inspect.Analyzer].(*inspector.Inspector) | ||||
| // 		inspect.Preorder(nil, func(n ast.Node) { | ||||
| // 			... | ||||
| // 		}) | ||||
| // 		return nil | ||||
| // 	} | ||||
| // | ||||
| package inspect | ||||
|  | ||||
| import ( | ||||
| 	"reflect" | ||||
|  | ||||
| 	"golang.org/x/tools/go/analysis" | ||||
| 	"golang.org/x/tools/go/ast/inspector" | ||||
| ) | ||||
|  | ||||
| var Analyzer = &analysis.Analyzer{ | ||||
| 	Name:             "inspect", | ||||
| 	Doc:              "optimize AST traversal for later passes", | ||||
| 	Run:              run, | ||||
| 	RunDespiteErrors: true, | ||||
| 	ResultType:       reflect.TypeOf(new(inspector.Inspector)), | ||||
| } | ||||
|  | ||||
| func run(pass *analysis.Pass) (interface{}, error) { | ||||
| 	return inspector.New(pass.Files), nil | ||||
| } | ||||
							
								
								
									
										104
									
								
								vendor/golang.org/x/tools/go/analysis/validate.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										104
									
								
								vendor/golang.org/x/tools/go/analysis/validate.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,104 @@ | ||||
| package analysis | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"reflect" | ||||
| 	"unicode" | ||||
| ) | ||||
|  | ||||
| // Validate reports an error if any of the analyzers are misconfigured. | ||||
| // Checks include: | ||||
| // that the name is a valid identifier; | ||||
| // that analyzer names are unique; | ||||
| // that the Requires graph is acylic; | ||||
| // that analyzer fact types are unique; | ||||
| // that each fact type is a pointer. | ||||
| func Validate(analyzers []*Analyzer) error { | ||||
| 	names := make(map[string]bool) | ||||
|  | ||||
| 	// Map each fact type to its sole generating analyzer. | ||||
| 	factTypes := make(map[reflect.Type]*Analyzer) | ||||
|  | ||||
| 	// Traverse the Requires graph, depth first. | ||||
| 	const ( | ||||
| 		white = iota | ||||
| 		grey | ||||
| 		black | ||||
| 		finished | ||||
| 	) | ||||
| 	color := make(map[*Analyzer]uint8) | ||||
| 	var visit func(a *Analyzer) error | ||||
| 	visit = func(a *Analyzer) error { | ||||
| 		if a == nil { | ||||
| 			return fmt.Errorf("nil *Analyzer") | ||||
| 		} | ||||
| 		if color[a] == white { | ||||
| 			color[a] = grey | ||||
|  | ||||
| 			// names | ||||
| 			if !validIdent(a.Name) { | ||||
| 				return fmt.Errorf("invalid analyzer name %q", a) | ||||
| 			} | ||||
| 			if names[a.Name] { | ||||
| 				return fmt.Errorf("duplicate analyzer name %q", a) | ||||
| 			} | ||||
| 			names[a.Name] = true | ||||
|  | ||||
| 			if a.Doc == "" { | ||||
| 				return fmt.Errorf("analyzer %q is undocumented", a) | ||||
| 			} | ||||
|  | ||||
| 			// fact types | ||||
| 			for _, f := range a.FactTypes { | ||||
| 				if f == nil { | ||||
| 					return fmt.Errorf("analyzer %s has nil FactType", a) | ||||
| 				} | ||||
| 				t := reflect.TypeOf(f) | ||||
| 				if prev := factTypes[t]; prev != nil { | ||||
| 					return fmt.Errorf("fact type %s registered by two analyzers: %v, %v", | ||||
| 						t, a, prev) | ||||
| 				} | ||||
| 				if t.Kind() != reflect.Ptr { | ||||
| 					return fmt.Errorf("%s: fact type %s is not a pointer", a, t) | ||||
| 				} | ||||
| 				factTypes[t] = a | ||||
| 			} | ||||
|  | ||||
| 			// recursion | ||||
| 			for i, req := range a.Requires { | ||||
| 				if err := visit(req); err != nil { | ||||
| 					return fmt.Errorf("%s.Requires[%d]: %v", a.Name, i, err) | ||||
| 				} | ||||
| 			} | ||||
| 			color[a] = black | ||||
| 		} | ||||
|  | ||||
| 		return nil | ||||
| 	} | ||||
| 	for _, a := range analyzers { | ||||
| 		if err := visit(a); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// Reject duplicates among analyzers. | ||||
| 	// Precondition:  color[a] == black. | ||||
| 	// Postcondition: color[a] == finished. | ||||
| 	for _, a := range analyzers { | ||||
| 		if color[a] == finished { | ||||
| 			return fmt.Errorf("duplicate analyzer: %s", a.Name) | ||||
| 		} | ||||
| 		color[a] = finished | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func validIdent(name string) bool { | ||||
| 	for i, r := range name { | ||||
| 		if !(r == '_' || unicode.IsLetter(r) || i > 0 && unicode.IsDigit(r)) { | ||||
| 			return false | ||||
| 		} | ||||
| 	} | ||||
| 	return name != "" | ||||
| } | ||||
							
								
								
									
										26
									
								
								vendor/golang.org/x/tools/go/ast/inspector/BUILD
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										26
									
								
								vendor/golang.org/x/tools/go/ast/inspector/BUILD
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,26 @@ | ||||
| load("@io_bazel_rules_go//go:def.bzl", "go_library") | ||||
|  | ||||
| go_library( | ||||
|     name = "go_default_library", | ||||
|     srcs = [ | ||||
|         "inspector.go", | ||||
|         "typeof.go", | ||||
|     ], | ||||
|     importmap = "k8s.io/kubernetes/vendor/golang.org/x/tools/go/ast/inspector", | ||||
|     importpath = "golang.org/x/tools/go/ast/inspector", | ||||
|     visibility = ["//visibility:public"], | ||||
| ) | ||||
|  | ||||
| filegroup( | ||||
|     name = "package-srcs", | ||||
|     srcs = glob(["**"]), | ||||
|     tags = ["automanaged"], | ||||
|     visibility = ["//visibility:private"], | ||||
| ) | ||||
|  | ||||
| filegroup( | ||||
|     name = "all-srcs", | ||||
|     srcs = [":package-srcs"], | ||||
|     tags = ["automanaged"], | ||||
|     visibility = ["//visibility:public"], | ||||
| ) | ||||
							
								
								
									
										182
									
								
								vendor/golang.org/x/tools/go/ast/inspector/inspector.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										182
									
								
								vendor/golang.org/x/tools/go/ast/inspector/inspector.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,182 @@ | ||||
| // Copyright 2018 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| // Package inspector provides helper functions for traversal over the | ||||
| // syntax trees of a package, including node filtering by type, and | ||||
| // materialization of the traversal stack. | ||||
| // | ||||
| // During construction, the inspector does a complete traversal and | ||||
| // builds a list of push/pop events and their node type. Subsequent | ||||
| // method calls that request a traversal scan this list, rather than walk | ||||
| // the AST, and perform type filtering using efficient bit sets. | ||||
| // | ||||
| // Experiments suggest the inspector's traversals are about 2.5x faster | ||||
| // than ast.Inspect, but it may take around 5 traversals for this | ||||
| // benefit to amortize the inspector's construction cost. | ||||
| // If efficiency is the primary concern, do not use Inspector for | ||||
| // one-off traversals. | ||||
| package inspector | ||||
|  | ||||
| // There are four orthogonal features in a traversal: | ||||
| //  1 type filtering | ||||
| //  2 pruning | ||||
| //  3 postorder calls to f | ||||
| //  4 stack | ||||
| // Rather than offer all of them in the API, | ||||
| // only a few combinations are exposed: | ||||
| // - Preorder is the fastest and has fewest features, | ||||
| //   but is the most commonly needed traversal. | ||||
| // - Nodes and WithStack both provide pruning and postorder calls, | ||||
| //   even though few clients need it, because supporting two versions | ||||
| //   is not justified. | ||||
| // More combinations could be supported by expressing them as | ||||
| // wrappers around a more generic traversal, but this was measured | ||||
| // and found to degrade performance significantly (30%). | ||||
|  | ||||
| import ( | ||||
| 	"go/ast" | ||||
| ) | ||||
|  | ||||
| // An Inspector provides methods for inspecting | ||||
| // (traversing) the syntax trees of a package. | ||||
| type Inspector struct { | ||||
| 	events []event | ||||
| } | ||||
|  | ||||
| // New returns an Inspector for the specified syntax trees. | ||||
| func New(files []*ast.File) *Inspector { | ||||
| 	return &Inspector{traverse(files)} | ||||
| } | ||||
|  | ||||
| // An event represents a push or a pop | ||||
| // of an ast.Node during a traversal. | ||||
| type event struct { | ||||
| 	node  ast.Node | ||||
| 	typ   uint64 // typeOf(node) | ||||
| 	index int    // 1 + index of corresponding pop event, or 0 if this is a pop | ||||
| } | ||||
|  | ||||
| // Preorder visits all the nodes of the files supplied to New in | ||||
| // depth-first order. It calls f(n) for each node n before it visits | ||||
| // n's children. | ||||
| // | ||||
| // The types argument, if non-empty, enables type-based filtering of | ||||
| // events. The function f if is called only for nodes whose type | ||||
| // matches an element of the types slice. | ||||
| func (in *Inspector) Preorder(types []ast.Node, f func(ast.Node)) { | ||||
| 	// Because it avoids postorder calls to f, and the pruning | ||||
| 	// check, Preorder is almost twice as fast as Nodes. The two | ||||
| 	// features seem to contribute similar slowdowns (~1.4x each). | ||||
|  | ||||
| 	mask := maskOf(types) | ||||
| 	for i := 0; i < len(in.events); { | ||||
| 		ev := in.events[i] | ||||
| 		if ev.typ&mask != 0 { | ||||
| 			if ev.index > 0 { | ||||
| 				f(ev.node) | ||||
| 			} | ||||
| 		} | ||||
| 		i++ | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Nodes visits the nodes of the files supplied to New in depth-first | ||||
| // order. It calls f(n, true) for each node n before it visits n's | ||||
| // children. If f returns true, Nodes invokes f recursively for each | ||||
| // of the non-nil children of the node, followed by a call of | ||||
| // f(n, false). | ||||
| // | ||||
| // The types argument, if non-empty, enables type-based filtering of | ||||
| // events. The function f if is called only for nodes whose type | ||||
| // matches an element of the types slice. | ||||
| func (in *Inspector) Nodes(types []ast.Node, f func(n ast.Node, push bool) (prune bool)) { | ||||
| 	mask := maskOf(types) | ||||
| 	for i := 0; i < len(in.events); { | ||||
| 		ev := in.events[i] | ||||
| 		if ev.typ&mask != 0 { | ||||
| 			if ev.index > 0 { | ||||
| 				// push | ||||
| 				if !f(ev.node, true) { | ||||
| 					i = ev.index // jump to corresponding pop + 1 | ||||
| 					continue | ||||
| 				} | ||||
| 			} else { | ||||
| 				// pop | ||||
| 				f(ev.node, false) | ||||
| 			} | ||||
| 		} | ||||
| 		i++ | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // WithStack visits nodes in a similar manner to Nodes, but it | ||||
| // supplies each call to f an additional argument, the current | ||||
| // traversal stack. The stack's first element is the outermost node, | ||||
| // an *ast.File; its last is the innermost, n. | ||||
| func (in *Inspector) WithStack(types []ast.Node, f func(n ast.Node, push bool, stack []ast.Node) (prune bool)) { | ||||
| 	mask := maskOf(types) | ||||
| 	var stack []ast.Node | ||||
| 	for i := 0; i < len(in.events); { | ||||
| 		ev := in.events[i] | ||||
| 		if ev.index > 0 { | ||||
| 			// push | ||||
| 			stack = append(stack, ev.node) | ||||
| 			if ev.typ&mask != 0 { | ||||
| 				if !f(ev.node, true, stack) { | ||||
| 					i = ev.index | ||||
| 					stack = stack[:len(stack)-1] | ||||
| 					continue | ||||
| 				} | ||||
| 			} | ||||
| 		} else { | ||||
| 			// pop | ||||
| 			if ev.typ&mask != 0 { | ||||
| 				f(ev.node, false, stack) | ||||
| 			} | ||||
| 			stack = stack[:len(stack)-1] | ||||
| 		} | ||||
| 		i++ | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // traverse builds the table of events representing a traversal. | ||||
| func traverse(files []*ast.File) []event { | ||||
| 	// Preallocate approximate number of events | ||||
| 	// based on source file extent. | ||||
| 	// This makes traverse faster by 4x (!). | ||||
| 	var extent int | ||||
| 	for _, f := range files { | ||||
| 		extent += int(f.End() - f.Pos()) | ||||
| 	} | ||||
| 	// This estimate is based on the net/http package. | ||||
| 	events := make([]event, 0, extent*33/100) | ||||
|  | ||||
| 	var stack []event | ||||
| 	for _, f := range files { | ||||
| 		ast.Inspect(f, func(n ast.Node) bool { | ||||
| 			if n != nil { | ||||
| 				// push | ||||
| 				ev := event{ | ||||
| 					node:  n, | ||||
| 					typ:   typeOf(n), | ||||
| 					index: len(events), // push event temporarily holds own index | ||||
| 				} | ||||
| 				stack = append(stack, ev) | ||||
| 				events = append(events, ev) | ||||
| 			} else { | ||||
| 				// pop | ||||
| 				ev := stack[len(stack)-1] | ||||
| 				stack = stack[:len(stack)-1] | ||||
|  | ||||
| 				events[ev.index].index = len(events) + 1 // make push refer to pop | ||||
|  | ||||
| 				ev.index = 0 // turn ev into a pop event | ||||
| 				events = append(events, ev) | ||||
| 			} | ||||
| 			return true | ||||
| 		}) | ||||
| 	} | ||||
|  | ||||
| 	return events | ||||
| } | ||||
							
								
								
									
										216
									
								
								vendor/golang.org/x/tools/go/ast/inspector/typeof.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										216
									
								
								vendor/golang.org/x/tools/go/ast/inspector/typeof.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,216 @@ | ||||
| package inspector | ||||
|  | ||||
| // This file defines func typeOf(ast.Node) uint64. | ||||
| // | ||||
| // The initial map-based implementation was too slow; | ||||
| // see https://go-review.googlesource.com/c/tools/+/135655/1/go/ast/inspector/inspector.go#196 | ||||
|  | ||||
| import "go/ast" | ||||
|  | ||||
| const ( | ||||
| 	nArrayType = iota | ||||
| 	nAssignStmt | ||||
| 	nBadDecl | ||||
| 	nBadExpr | ||||
| 	nBadStmt | ||||
| 	nBasicLit | ||||
| 	nBinaryExpr | ||||
| 	nBlockStmt | ||||
| 	nBranchStmt | ||||
| 	nCallExpr | ||||
| 	nCaseClause | ||||
| 	nChanType | ||||
| 	nCommClause | ||||
| 	nComment | ||||
| 	nCommentGroup | ||||
| 	nCompositeLit | ||||
| 	nDeclStmt | ||||
| 	nDeferStmt | ||||
| 	nEllipsis | ||||
| 	nEmptyStmt | ||||
| 	nExprStmt | ||||
| 	nField | ||||
| 	nFieldList | ||||
| 	nFile | ||||
| 	nForStmt | ||||
| 	nFuncDecl | ||||
| 	nFuncLit | ||||
| 	nFuncType | ||||
| 	nGenDecl | ||||
| 	nGoStmt | ||||
| 	nIdent | ||||
| 	nIfStmt | ||||
| 	nImportSpec | ||||
| 	nIncDecStmt | ||||
| 	nIndexExpr | ||||
| 	nInterfaceType | ||||
| 	nKeyValueExpr | ||||
| 	nLabeledStmt | ||||
| 	nMapType | ||||
| 	nPackage | ||||
| 	nParenExpr | ||||
| 	nRangeStmt | ||||
| 	nReturnStmt | ||||
| 	nSelectStmt | ||||
| 	nSelectorExpr | ||||
| 	nSendStmt | ||||
| 	nSliceExpr | ||||
| 	nStarExpr | ||||
| 	nStructType | ||||
| 	nSwitchStmt | ||||
| 	nTypeAssertExpr | ||||
| 	nTypeSpec | ||||
| 	nTypeSwitchStmt | ||||
| 	nUnaryExpr | ||||
| 	nValueSpec | ||||
| ) | ||||
|  | ||||
| // typeOf returns a distinct single-bit value that represents the type of n. | ||||
| // | ||||
| // Various implementations were benchmarked with BenchmarkNewInspector: | ||||
| //								GOGC=off | ||||
| // - type switch				4.9-5.5ms	2.1ms | ||||
| // - binary search over a sorted list of types  5.5-5.9ms	2.5ms | ||||
| // - linear scan, frequency-ordered list 	5.9-6.1ms	2.7ms | ||||
| // - linear scan, unordered list		6.4ms		2.7ms | ||||
| // - hash table					6.5ms		3.1ms | ||||
| // A perfect hash seemed like overkill. | ||||
| // | ||||
| // The compiler's switch statement is the clear winner | ||||
| // as it produces a binary tree in code, | ||||
| // with constant conditions and good branch prediction. | ||||
| // (Sadly it is the most verbose in source code.) | ||||
| // Binary search suffered from poor branch prediction. | ||||
| // | ||||
| func typeOf(n ast.Node) uint64 { | ||||
| 	// Fast path: nearly half of all nodes are identifiers. | ||||
| 	if _, ok := n.(*ast.Ident); ok { | ||||
| 		return 1 << nIdent | ||||
| 	} | ||||
|  | ||||
| 	// These cases include all nodes encountered by ast.Inspect. | ||||
| 	switch n.(type) { | ||||
| 	case *ast.ArrayType: | ||||
| 		return 1 << nArrayType | ||||
| 	case *ast.AssignStmt: | ||||
| 		return 1 << nAssignStmt | ||||
| 	case *ast.BadDecl: | ||||
| 		return 1 << nBadDecl | ||||
| 	case *ast.BadExpr: | ||||
| 		return 1 << nBadExpr | ||||
| 	case *ast.BadStmt: | ||||
| 		return 1 << nBadStmt | ||||
| 	case *ast.BasicLit: | ||||
| 		return 1 << nBasicLit | ||||
| 	case *ast.BinaryExpr: | ||||
| 		return 1 << nBinaryExpr | ||||
| 	case *ast.BlockStmt: | ||||
| 		return 1 << nBlockStmt | ||||
| 	case *ast.BranchStmt: | ||||
| 		return 1 << nBranchStmt | ||||
| 	case *ast.CallExpr: | ||||
| 		return 1 << nCallExpr | ||||
| 	case *ast.CaseClause: | ||||
| 		return 1 << nCaseClause | ||||
| 	case *ast.ChanType: | ||||
| 		return 1 << nChanType | ||||
| 	case *ast.CommClause: | ||||
| 		return 1 << nCommClause | ||||
| 	case *ast.Comment: | ||||
| 		return 1 << nComment | ||||
| 	case *ast.CommentGroup: | ||||
| 		return 1 << nCommentGroup | ||||
| 	case *ast.CompositeLit: | ||||
| 		return 1 << nCompositeLit | ||||
| 	case *ast.DeclStmt: | ||||
| 		return 1 << nDeclStmt | ||||
| 	case *ast.DeferStmt: | ||||
| 		return 1 << nDeferStmt | ||||
| 	case *ast.Ellipsis: | ||||
| 		return 1 << nEllipsis | ||||
| 	case *ast.EmptyStmt: | ||||
| 		return 1 << nEmptyStmt | ||||
| 	case *ast.ExprStmt: | ||||
| 		return 1 << nExprStmt | ||||
| 	case *ast.Field: | ||||
| 		return 1 << nField | ||||
| 	case *ast.FieldList: | ||||
| 		return 1 << nFieldList | ||||
| 	case *ast.File: | ||||
| 		return 1 << nFile | ||||
| 	case *ast.ForStmt: | ||||
| 		return 1 << nForStmt | ||||
| 	case *ast.FuncDecl: | ||||
| 		return 1 << nFuncDecl | ||||
| 	case *ast.FuncLit: | ||||
| 		return 1 << nFuncLit | ||||
| 	case *ast.FuncType: | ||||
| 		return 1 << nFuncType | ||||
| 	case *ast.GenDecl: | ||||
| 		return 1 << nGenDecl | ||||
| 	case *ast.GoStmt: | ||||
| 		return 1 << nGoStmt | ||||
| 	case *ast.Ident: | ||||
| 		return 1 << nIdent | ||||
| 	case *ast.IfStmt: | ||||
| 		return 1 << nIfStmt | ||||
| 	case *ast.ImportSpec: | ||||
| 		return 1 << nImportSpec | ||||
| 	case *ast.IncDecStmt: | ||||
| 		return 1 << nIncDecStmt | ||||
| 	case *ast.IndexExpr: | ||||
| 		return 1 << nIndexExpr | ||||
| 	case *ast.InterfaceType: | ||||
| 		return 1 << nInterfaceType | ||||
| 	case *ast.KeyValueExpr: | ||||
| 		return 1 << nKeyValueExpr | ||||
| 	case *ast.LabeledStmt: | ||||
| 		return 1 << nLabeledStmt | ||||
| 	case *ast.MapType: | ||||
| 		return 1 << nMapType | ||||
| 	case *ast.Package: | ||||
| 		return 1 << nPackage | ||||
| 	case *ast.ParenExpr: | ||||
| 		return 1 << nParenExpr | ||||
| 	case *ast.RangeStmt: | ||||
| 		return 1 << nRangeStmt | ||||
| 	case *ast.ReturnStmt: | ||||
| 		return 1 << nReturnStmt | ||||
| 	case *ast.SelectStmt: | ||||
| 		return 1 << nSelectStmt | ||||
| 	case *ast.SelectorExpr: | ||||
| 		return 1 << nSelectorExpr | ||||
| 	case *ast.SendStmt: | ||||
| 		return 1 << nSendStmt | ||||
| 	case *ast.SliceExpr: | ||||
| 		return 1 << nSliceExpr | ||||
| 	case *ast.StarExpr: | ||||
| 		return 1 << nStarExpr | ||||
| 	case *ast.StructType: | ||||
| 		return 1 << nStructType | ||||
| 	case *ast.SwitchStmt: | ||||
| 		return 1 << nSwitchStmt | ||||
| 	case *ast.TypeAssertExpr: | ||||
| 		return 1 << nTypeAssertExpr | ||||
| 	case *ast.TypeSpec: | ||||
| 		return 1 << nTypeSpec | ||||
| 	case *ast.TypeSwitchStmt: | ||||
| 		return 1 << nTypeSwitchStmt | ||||
| 	case *ast.UnaryExpr: | ||||
| 		return 1 << nUnaryExpr | ||||
| 	case *ast.ValueSpec: | ||||
| 		return 1 << nValueSpec | ||||
| 	} | ||||
| 	return 0 | ||||
| } | ||||
|  | ||||
| func maskOf(nodes []ast.Node) uint64 { | ||||
| 	if nodes == nil { | ||||
| 		return 1<<64 - 1 // match all node types | ||||
| 	} | ||||
| 	var mask uint64 | ||||
| 	for _, n := range nodes { | ||||
| 		mask |= typeOf(n) | ||||
| 	} | ||||
| 	return mask | ||||
| } | ||||
							
								
								
									
										29
									
								
								vendor/golang.org/x/tools/go/buildutil/BUILD
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										29
									
								
								vendor/golang.org/x/tools/go/buildutil/BUILD
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,29 @@ | ||||
| load("@io_bazel_rules_go//go:def.bzl", "go_library") | ||||
|  | ||||
| go_library( | ||||
|     name = "go_default_library", | ||||
|     srcs = [ | ||||
|         "allpackages.go", | ||||
|         "fakecontext.go", | ||||
|         "overlay.go", | ||||
|         "tags.go", | ||||
|         "util.go", | ||||
|     ], | ||||
|     importmap = "k8s.io/kubernetes/vendor/golang.org/x/tools/go/buildutil", | ||||
|     importpath = "golang.org/x/tools/go/buildutil", | ||||
|     visibility = ["//visibility:public"], | ||||
| ) | ||||
|  | ||||
| filegroup( | ||||
|     name = "package-srcs", | ||||
|     srcs = glob(["**"]), | ||||
|     tags = ["automanaged"], | ||||
|     visibility = ["//visibility:private"], | ||||
| ) | ||||
|  | ||||
| filegroup( | ||||
|     name = "all-srcs", | ||||
|     srcs = [":package-srcs"], | ||||
|     tags = ["automanaged"], | ||||
|     visibility = ["//visibility:public"], | ||||
| ) | ||||
							
								
								
									
										198
									
								
								vendor/golang.org/x/tools/go/buildutil/allpackages.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										198
									
								
								vendor/golang.org/x/tools/go/buildutil/allpackages.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,198 @@ | ||||
| // Copyright 2014 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| // Package buildutil provides utilities related to the go/build | ||||
| // package in the standard library. | ||||
| // | ||||
| // All I/O is done via the build.Context file system interface, which must | ||||
| // be concurrency-safe. | ||||
| package buildutil // import "golang.org/x/tools/go/buildutil" | ||||
|  | ||||
| import ( | ||||
| 	"go/build" | ||||
| 	"os" | ||||
| 	"path/filepath" | ||||
| 	"sort" | ||||
| 	"strings" | ||||
| 	"sync" | ||||
| ) | ||||
|  | ||||
| // AllPackages returns the package path of each Go package in any source | ||||
| // directory of the specified build context (e.g. $GOROOT or an element | ||||
| // of $GOPATH).  Errors are ignored.  The results are sorted. | ||||
| // All package paths are canonical, and thus may contain "/vendor/". | ||||
| // | ||||
| // The result may include import paths for directories that contain no | ||||
| // *.go files, such as "archive" (in $GOROOT/src). | ||||
| // | ||||
| // All I/O is done via the build.Context file system interface, | ||||
| // which must be concurrency-safe. | ||||
| // | ||||
| func AllPackages(ctxt *build.Context) []string { | ||||
| 	var list []string | ||||
| 	ForEachPackage(ctxt, func(pkg string, _ error) { | ||||
| 		list = append(list, pkg) | ||||
| 	}) | ||||
| 	sort.Strings(list) | ||||
| 	return list | ||||
| } | ||||
|  | ||||
| // ForEachPackage calls the found function with the package path of | ||||
| // each Go package it finds in any source directory of the specified | ||||
| // build context (e.g. $GOROOT or an element of $GOPATH). | ||||
| // All package paths are canonical, and thus may contain "/vendor/". | ||||
| // | ||||
| // If the package directory exists but could not be read, the second | ||||
| // argument to the found function provides the error. | ||||
| // | ||||
| // All I/O is done via the build.Context file system interface, | ||||
| // which must be concurrency-safe. | ||||
| // | ||||
| func ForEachPackage(ctxt *build.Context, found func(importPath string, err error)) { | ||||
| 	ch := make(chan item) | ||||
|  | ||||
| 	var wg sync.WaitGroup | ||||
| 	for _, root := range ctxt.SrcDirs() { | ||||
| 		root := root | ||||
| 		wg.Add(1) | ||||
| 		go func() { | ||||
| 			allPackages(ctxt, root, ch) | ||||
| 			wg.Done() | ||||
| 		}() | ||||
| 	} | ||||
| 	go func() { | ||||
| 		wg.Wait() | ||||
| 		close(ch) | ||||
| 	}() | ||||
|  | ||||
| 	// All calls to found occur in the caller's goroutine. | ||||
| 	for i := range ch { | ||||
| 		found(i.importPath, i.err) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| type item struct { | ||||
| 	importPath string | ||||
| 	err        error // (optional) | ||||
| } | ||||
|  | ||||
| // We use a process-wide counting semaphore to limit | ||||
| // the number of parallel calls to ReadDir. | ||||
| var ioLimit = make(chan bool, 20) | ||||
|  | ||||
| func allPackages(ctxt *build.Context, root string, ch chan<- item) { | ||||
| 	root = filepath.Clean(root) + string(os.PathSeparator) | ||||
|  | ||||
| 	var wg sync.WaitGroup | ||||
|  | ||||
| 	var walkDir func(dir string) | ||||
| 	walkDir = func(dir string) { | ||||
| 		// Avoid .foo, _foo, and testdata directory trees. | ||||
| 		base := filepath.Base(dir) | ||||
| 		if base == "" || base[0] == '.' || base[0] == '_' || base == "testdata" { | ||||
| 			return | ||||
| 		} | ||||
|  | ||||
| 		pkg := filepath.ToSlash(strings.TrimPrefix(dir, root)) | ||||
|  | ||||
| 		// Prune search if we encounter any of these import paths. | ||||
| 		switch pkg { | ||||
| 		case "builtin": | ||||
| 			return | ||||
| 		} | ||||
|  | ||||
| 		ioLimit <- true | ||||
| 		files, err := ReadDir(ctxt, dir) | ||||
| 		<-ioLimit | ||||
| 		if pkg != "" || err != nil { | ||||
| 			ch <- item{pkg, err} | ||||
| 		} | ||||
| 		for _, fi := range files { | ||||
| 			fi := fi | ||||
| 			if fi.IsDir() { | ||||
| 				wg.Add(1) | ||||
| 				go func() { | ||||
| 					walkDir(filepath.Join(dir, fi.Name())) | ||||
| 					wg.Done() | ||||
| 				}() | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	walkDir(root) | ||||
| 	wg.Wait() | ||||
| } | ||||
|  | ||||
| // ExpandPatterns returns the set of packages matched by patterns, | ||||
| // which may have the following forms: | ||||
| // | ||||
| //		golang.org/x/tools/cmd/guru     # a single package | ||||
| //		golang.org/x/tools/...          # all packages beneath dir | ||||
| //		...                             # the entire workspace. | ||||
| // | ||||
| // Order is significant: a pattern preceded by '-' removes matching | ||||
| // packages from the set.  For example, these patterns match all encoding | ||||
| // packages except encoding/xml: | ||||
| // | ||||
| // 	encoding/... -encoding/xml | ||||
| // | ||||
| // A trailing slash in a pattern is ignored.  (Path components of Go | ||||
| // package names are separated by slash, not the platform's path separator.) | ||||
| // | ||||
| func ExpandPatterns(ctxt *build.Context, patterns []string) map[string]bool { | ||||
| 	// TODO(adonovan): support other features of 'go list': | ||||
| 	// - "std"/"cmd"/"all" meta-packages | ||||
| 	// - "..." not at the end of a pattern | ||||
| 	// - relative patterns using "./" or "../" prefix | ||||
|  | ||||
| 	pkgs := make(map[string]bool) | ||||
| 	doPkg := func(pkg string, neg bool) { | ||||
| 		if neg { | ||||
| 			delete(pkgs, pkg) | ||||
| 		} else { | ||||
| 			pkgs[pkg] = true | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// Scan entire workspace if wildcards are present. | ||||
| 	// TODO(adonovan): opt: scan only the necessary subtrees of the workspace. | ||||
| 	var all []string | ||||
| 	for _, arg := range patterns { | ||||
| 		if strings.HasSuffix(arg, "...") { | ||||
| 			all = AllPackages(ctxt) | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	for _, arg := range patterns { | ||||
| 		if arg == "" { | ||||
| 			continue | ||||
| 		} | ||||
|  | ||||
| 		neg := arg[0] == '-' | ||||
| 		if neg { | ||||
| 			arg = arg[1:] | ||||
| 		} | ||||
|  | ||||
| 		if arg == "..." { | ||||
| 			// ... matches all packages | ||||
| 			for _, pkg := range all { | ||||
| 				doPkg(pkg, neg) | ||||
| 			} | ||||
| 		} else if dir := strings.TrimSuffix(arg, "/..."); dir != arg { | ||||
| 			// dir/... matches all packages beneath dir | ||||
| 			for _, pkg := range all { | ||||
| 				if strings.HasPrefix(pkg, dir) && | ||||
| 					(len(pkg) == len(dir) || pkg[len(dir)] == '/') { | ||||
| 					doPkg(pkg, neg) | ||||
| 				} | ||||
| 			} | ||||
| 		} else { | ||||
| 			// single package | ||||
| 			doPkg(strings.TrimSuffix(arg, "/"), neg) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return pkgs | ||||
| } | ||||
							
								
								
									
										109
									
								
								vendor/golang.org/x/tools/go/buildutil/fakecontext.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										109
									
								
								vendor/golang.org/x/tools/go/buildutil/fakecontext.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,109 @@ | ||||
| package buildutil | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"go/build" | ||||
| 	"io" | ||||
| 	"io/ioutil" | ||||
| 	"os" | ||||
| 	"path" | ||||
| 	"path/filepath" | ||||
| 	"sort" | ||||
| 	"strings" | ||||
| 	"time" | ||||
| ) | ||||
|  | ||||
| // FakeContext returns a build.Context for the fake file tree specified | ||||
| // by pkgs, which maps package import paths to a mapping from file base | ||||
| // names to contents. | ||||
| // | ||||
| // The fake Context has a GOROOT of "/go" and no GOPATH, and overrides | ||||
| // the necessary file access methods to read from memory instead of the | ||||
| // real file system. | ||||
| // | ||||
| // Unlike a real file tree, the fake one has only two levels---packages | ||||
| // and files---so ReadDir("/go/src/") returns all packages under | ||||
| // /go/src/ including, for instance, "math" and "math/big". | ||||
| // ReadDir("/go/src/math/big") would return all the files in the | ||||
| // "math/big" package. | ||||
| // | ||||
| func FakeContext(pkgs map[string]map[string]string) *build.Context { | ||||
| 	clean := func(filename string) string { | ||||
| 		f := path.Clean(filepath.ToSlash(filename)) | ||||
| 		// Removing "/go/src" while respecting segment | ||||
| 		// boundaries has this unfortunate corner case: | ||||
| 		if f == "/go/src" { | ||||
| 			return "" | ||||
| 		} | ||||
| 		return strings.TrimPrefix(f, "/go/src/") | ||||
| 	} | ||||
|  | ||||
| 	ctxt := build.Default // copy | ||||
| 	ctxt.GOROOT = "/go" | ||||
| 	ctxt.GOPATH = "" | ||||
| 	ctxt.Compiler = "gc" | ||||
| 	ctxt.IsDir = func(dir string) bool { | ||||
| 		dir = clean(dir) | ||||
| 		if dir == "" { | ||||
| 			return true // needed by (*build.Context).SrcDirs | ||||
| 		} | ||||
| 		return pkgs[dir] != nil | ||||
| 	} | ||||
| 	ctxt.ReadDir = func(dir string) ([]os.FileInfo, error) { | ||||
| 		dir = clean(dir) | ||||
| 		var fis []os.FileInfo | ||||
| 		if dir == "" { | ||||
| 			// enumerate packages | ||||
| 			for importPath := range pkgs { | ||||
| 				fis = append(fis, fakeDirInfo(importPath)) | ||||
| 			} | ||||
| 		} else { | ||||
| 			// enumerate files of package | ||||
| 			for basename := range pkgs[dir] { | ||||
| 				fis = append(fis, fakeFileInfo(basename)) | ||||
| 			} | ||||
| 		} | ||||
| 		sort.Sort(byName(fis)) | ||||
| 		return fis, nil | ||||
| 	} | ||||
| 	ctxt.OpenFile = func(filename string) (io.ReadCloser, error) { | ||||
| 		filename = clean(filename) | ||||
| 		dir, base := path.Split(filename) | ||||
| 		content, ok := pkgs[path.Clean(dir)][base] | ||||
| 		if !ok { | ||||
| 			return nil, fmt.Errorf("file not found: %s", filename) | ||||
| 		} | ||||
| 		return ioutil.NopCloser(strings.NewReader(content)), nil | ||||
| 	} | ||||
| 	ctxt.IsAbsPath = func(path string) bool { | ||||
| 		path = filepath.ToSlash(path) | ||||
| 		// Don't rely on the default (filepath.Path) since on | ||||
| 		// Windows, it reports virtual paths as non-absolute. | ||||
| 		return strings.HasPrefix(path, "/") | ||||
| 	} | ||||
| 	return &ctxt | ||||
| } | ||||
|  | ||||
| type byName []os.FileInfo | ||||
|  | ||||
| func (s byName) Len() int           { return len(s) } | ||||
| func (s byName) Swap(i, j int)      { s[i], s[j] = s[j], s[i] } | ||||
| func (s byName) Less(i, j int) bool { return s[i].Name() < s[j].Name() } | ||||
|  | ||||
| type fakeFileInfo string | ||||
|  | ||||
| func (fi fakeFileInfo) Name() string    { return string(fi) } | ||||
| func (fakeFileInfo) Sys() interface{}   { return nil } | ||||
| func (fakeFileInfo) ModTime() time.Time { return time.Time{} } | ||||
| func (fakeFileInfo) IsDir() bool        { return false } | ||||
| func (fakeFileInfo) Size() int64        { return 0 } | ||||
| func (fakeFileInfo) Mode() os.FileMode  { return 0644 } | ||||
|  | ||||
| type fakeDirInfo string | ||||
|  | ||||
| func (fd fakeDirInfo) Name() string    { return string(fd) } | ||||
| func (fakeDirInfo) Sys() interface{}   { return nil } | ||||
| func (fakeDirInfo) ModTime() time.Time { return time.Time{} } | ||||
| func (fakeDirInfo) IsDir() bool        { return true } | ||||
| func (fakeDirInfo) Size() int64        { return 0 } | ||||
| func (fakeDirInfo) Mode() os.FileMode  { return 0755 } | ||||
							
								
								
									
										103
									
								
								vendor/golang.org/x/tools/go/buildutil/overlay.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										103
									
								
								vendor/golang.org/x/tools/go/buildutil/overlay.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,103 @@ | ||||
| // Copyright 2016 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package buildutil | ||||
|  | ||||
| import ( | ||||
| 	"bufio" | ||||
| 	"bytes" | ||||
| 	"fmt" | ||||
| 	"go/build" | ||||
| 	"io" | ||||
| 	"io/ioutil" | ||||
| 	"path/filepath" | ||||
| 	"strconv" | ||||
| 	"strings" | ||||
| ) | ||||
|  | ||||
| // OverlayContext overlays a build.Context with additional files from | ||||
| // a map. Files in the map take precedence over other files. | ||||
| // | ||||
| // In addition to plain string comparison, two file names are | ||||
| // considered equal if their base names match and their directory | ||||
| // components point at the same directory on the file system. That is, | ||||
| // symbolic links are followed for directories, but not files. | ||||
| // | ||||
| // A common use case for OverlayContext is to allow editors to pass in | ||||
| // a set of unsaved, modified files. | ||||
| // | ||||
| // Currently, only the Context.OpenFile function will respect the | ||||
| // overlay. This may change in the future. | ||||
| func OverlayContext(orig *build.Context, overlay map[string][]byte) *build.Context { | ||||
| 	// TODO(dominikh): Implement IsDir, HasSubdir and ReadDir | ||||
|  | ||||
| 	rc := func(data []byte) (io.ReadCloser, error) { | ||||
| 		return ioutil.NopCloser(bytes.NewBuffer(data)), nil | ||||
| 	} | ||||
|  | ||||
| 	copy := *orig // make a copy | ||||
| 	ctxt := © | ||||
| 	ctxt.OpenFile = func(path string) (io.ReadCloser, error) { | ||||
| 		// Fast path: names match exactly. | ||||
| 		if content, ok := overlay[path]; ok { | ||||
| 			return rc(content) | ||||
| 		} | ||||
|  | ||||
| 		// Slow path: check for same file under a different | ||||
| 		// alias, perhaps due to a symbolic link. | ||||
| 		for filename, content := range overlay { | ||||
| 			if sameFile(path, filename) { | ||||
| 				return rc(content) | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		return OpenFile(orig, path) | ||||
| 	} | ||||
| 	return ctxt | ||||
| } | ||||
|  | ||||
| // ParseOverlayArchive parses an archive containing Go files and their | ||||
| // contents. The result is intended to be used with OverlayContext. | ||||
| // | ||||
| // | ||||
| // Archive format | ||||
| // | ||||
| // The archive consists of a series of files. Each file consists of a | ||||
| // name, a decimal file size and the file contents, separated by | ||||
| // newlinews. No newline follows after the file contents. | ||||
| func ParseOverlayArchive(archive io.Reader) (map[string][]byte, error) { | ||||
| 	overlay := make(map[string][]byte) | ||||
| 	r := bufio.NewReader(archive) | ||||
| 	for { | ||||
| 		// Read file name. | ||||
| 		filename, err := r.ReadString('\n') | ||||
| 		if err != nil { | ||||
| 			if err == io.EOF { | ||||
| 				break // OK | ||||
| 			} | ||||
| 			return nil, fmt.Errorf("reading archive file name: %v", err) | ||||
| 		} | ||||
| 		filename = filepath.Clean(strings.TrimSpace(filename)) | ||||
|  | ||||
| 		// Read file size. | ||||
| 		sz, err := r.ReadString('\n') | ||||
| 		if err != nil { | ||||
| 			return nil, fmt.Errorf("reading size of archive file %s: %v", filename, err) | ||||
| 		} | ||||
| 		sz = strings.TrimSpace(sz) | ||||
| 		size, err := strconv.ParseUint(sz, 10, 32) | ||||
| 		if err != nil { | ||||
| 			return nil, fmt.Errorf("parsing size of archive file %s: %v", filename, err) | ||||
| 		} | ||||
|  | ||||
| 		// Read file content. | ||||
| 		content := make([]byte, size) | ||||
| 		if _, err := io.ReadFull(r, content); err != nil { | ||||
| 			return nil, fmt.Errorf("reading archive file %s: %v", filename, err) | ||||
| 		} | ||||
| 		overlay[filename] = content | ||||
| 	} | ||||
|  | ||||
| 	return overlay, nil | ||||
| } | ||||
							
								
								
									
										75
									
								
								vendor/golang.org/x/tools/go/buildutil/tags.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										75
									
								
								vendor/golang.org/x/tools/go/buildutil/tags.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,75 @@ | ||||
| package buildutil | ||||
|  | ||||
| // This logic was copied from stringsFlag from $GOROOT/src/cmd/go/build.go. | ||||
|  | ||||
| import "fmt" | ||||
|  | ||||
| const TagsFlagDoc = "a list of `build tags` to consider satisfied during the build. " + | ||||
| 	"For more information about build tags, see the description of " + | ||||
| 	"build constraints in the documentation for the go/build package" | ||||
|  | ||||
| // TagsFlag is an implementation of the flag.Value and flag.Getter interfaces that parses | ||||
| // a flag value in the same manner as go build's -tags flag and | ||||
| // populates a []string slice. | ||||
| // | ||||
| // See $GOROOT/src/go/build/doc.go for description of build tags. | ||||
| // See $GOROOT/src/cmd/go/doc.go for description of 'go build -tags' flag. | ||||
| // | ||||
| // Example: | ||||
| // 	flag.Var((*buildutil.TagsFlag)(&build.Default.BuildTags), "tags", buildutil.TagsFlagDoc) | ||||
| type TagsFlag []string | ||||
|  | ||||
| func (v *TagsFlag) Set(s string) error { | ||||
| 	var err error | ||||
| 	*v, err = splitQuotedFields(s) | ||||
| 	if *v == nil { | ||||
| 		*v = []string{} | ||||
| 	} | ||||
| 	return err | ||||
| } | ||||
|  | ||||
| func (v *TagsFlag) Get() interface{} { return *v } | ||||
|  | ||||
| func splitQuotedFields(s string) ([]string, error) { | ||||
| 	// Split fields allowing '' or "" around elements. | ||||
| 	// Quotes further inside the string do not count. | ||||
| 	var f []string | ||||
| 	for len(s) > 0 { | ||||
| 		for len(s) > 0 && isSpaceByte(s[0]) { | ||||
| 			s = s[1:] | ||||
| 		} | ||||
| 		if len(s) == 0 { | ||||
| 			break | ||||
| 		} | ||||
| 		// Accepted quoted string. No unescaping inside. | ||||
| 		if s[0] == '"' || s[0] == '\'' { | ||||
| 			quote := s[0] | ||||
| 			s = s[1:] | ||||
| 			i := 0 | ||||
| 			for i < len(s) && s[i] != quote { | ||||
| 				i++ | ||||
| 			} | ||||
| 			if i >= len(s) { | ||||
| 				return nil, fmt.Errorf("unterminated %c string", quote) | ||||
| 			} | ||||
| 			f = append(f, s[:i]) | ||||
| 			s = s[i+1:] | ||||
| 			continue | ||||
| 		} | ||||
| 		i := 0 | ||||
| 		for i < len(s) && !isSpaceByte(s[i]) { | ||||
| 			i++ | ||||
| 		} | ||||
| 		f = append(f, s[:i]) | ||||
| 		s = s[i:] | ||||
| 	} | ||||
| 	return f, nil | ||||
| } | ||||
|  | ||||
| func (v *TagsFlag) String() string { | ||||
| 	return "<tagsFlag>" | ||||
| } | ||||
|  | ||||
| func isSpaceByte(c byte) bool { | ||||
| 	return c == ' ' || c == '\t' || c == '\n' || c == '\r' | ||||
| } | ||||
							
								
								
									
										212
									
								
								vendor/golang.org/x/tools/go/buildutil/util.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										212
									
								
								vendor/golang.org/x/tools/go/buildutil/util.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,212 @@ | ||||
| // Copyright 2014 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package buildutil | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"go/ast" | ||||
| 	"go/build" | ||||
| 	"go/parser" | ||||
| 	"go/token" | ||||
| 	"io" | ||||
| 	"io/ioutil" | ||||
| 	"os" | ||||
| 	"path" | ||||
| 	"path/filepath" | ||||
| 	"strings" | ||||
| ) | ||||
|  | ||||
| // ParseFile behaves like parser.ParseFile, | ||||
| // but uses the build context's file system interface, if any. | ||||
| // | ||||
| // If file is not absolute (as defined by IsAbsPath), the (dir, file) | ||||
| // components are joined using JoinPath; dir must be absolute. | ||||
| // | ||||
| // The displayPath function, if provided, is used to transform the | ||||
| // filename that will be attached to the ASTs. | ||||
| // | ||||
| // TODO(adonovan): call this from go/loader.parseFiles when the tree thaws. | ||||
| // | ||||
| func ParseFile(fset *token.FileSet, ctxt *build.Context, displayPath func(string) string, dir string, file string, mode parser.Mode) (*ast.File, error) { | ||||
| 	if !IsAbsPath(ctxt, file) { | ||||
| 		file = JoinPath(ctxt, dir, file) | ||||
| 	} | ||||
| 	rd, err := OpenFile(ctxt, file) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	defer rd.Close() // ignore error | ||||
| 	if displayPath != nil { | ||||
| 		file = displayPath(file) | ||||
| 	} | ||||
| 	return parser.ParseFile(fset, file, rd, mode) | ||||
| } | ||||
|  | ||||
| // ContainingPackage returns the package containing filename. | ||||
| // | ||||
| // If filename is not absolute, it is interpreted relative to working directory dir. | ||||
| // All I/O is via the build context's file system interface, if any. | ||||
| // | ||||
| // The '...Files []string' fields of the resulting build.Package are not | ||||
| // populated (build.FindOnly mode). | ||||
| // | ||||
| func ContainingPackage(ctxt *build.Context, dir, filename string) (*build.Package, error) { | ||||
| 	if !IsAbsPath(ctxt, filename) { | ||||
| 		filename = JoinPath(ctxt, dir, filename) | ||||
| 	} | ||||
|  | ||||
| 	// We must not assume the file tree uses | ||||
| 	// "/" always, | ||||
| 	// `\` always, | ||||
| 	// or os.PathSeparator (which varies by platform), | ||||
| 	// but to make any progress, we are forced to assume that | ||||
| 	// paths will not use `\` unless the PathSeparator | ||||
| 	// is also `\`, thus we can rely on filepath.ToSlash for some sanity. | ||||
|  | ||||
| 	dirSlash := path.Dir(filepath.ToSlash(filename)) + "/" | ||||
|  | ||||
| 	// We assume that no source root (GOPATH[i] or GOROOT) contains any other. | ||||
| 	for _, srcdir := range ctxt.SrcDirs() { | ||||
| 		srcdirSlash := filepath.ToSlash(srcdir) + "/" | ||||
| 		if importPath, ok := HasSubdir(ctxt, srcdirSlash, dirSlash); ok { | ||||
| 			return ctxt.Import(importPath, dir, build.FindOnly) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return nil, fmt.Errorf("can't find package containing %s", filename) | ||||
| } | ||||
|  | ||||
| // -- Effective methods of file system interface ------------------------- | ||||
|  | ||||
| // (go/build.Context defines these as methods, but does not export them.) | ||||
|  | ||||
| // hasSubdir calls ctxt.HasSubdir (if not nil) or else uses | ||||
| // the local file system to answer the question. | ||||
| func HasSubdir(ctxt *build.Context, root, dir string) (rel string, ok bool) { | ||||
| 	if f := ctxt.HasSubdir; f != nil { | ||||
| 		return f(root, dir) | ||||
| 	} | ||||
|  | ||||
| 	// Try using paths we received. | ||||
| 	if rel, ok = hasSubdir(root, dir); ok { | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	// Try expanding symlinks and comparing | ||||
| 	// expanded against unexpanded and | ||||
| 	// expanded against expanded. | ||||
| 	rootSym, _ := filepath.EvalSymlinks(root) | ||||
| 	dirSym, _ := filepath.EvalSymlinks(dir) | ||||
|  | ||||
| 	if rel, ok = hasSubdir(rootSym, dir); ok { | ||||
| 		return | ||||
| 	} | ||||
| 	if rel, ok = hasSubdir(root, dirSym); ok { | ||||
| 		return | ||||
| 	} | ||||
| 	return hasSubdir(rootSym, dirSym) | ||||
| } | ||||
|  | ||||
| func hasSubdir(root, dir string) (rel string, ok bool) { | ||||
| 	const sep = string(filepath.Separator) | ||||
| 	root = filepath.Clean(root) | ||||
| 	if !strings.HasSuffix(root, sep) { | ||||
| 		root += sep | ||||
| 	} | ||||
|  | ||||
| 	dir = filepath.Clean(dir) | ||||
| 	if !strings.HasPrefix(dir, root) { | ||||
| 		return "", false | ||||
| 	} | ||||
|  | ||||
| 	return filepath.ToSlash(dir[len(root):]), true | ||||
| } | ||||
|  | ||||
| // FileExists returns true if the specified file exists, | ||||
| // using the build context's file system interface. | ||||
| func FileExists(ctxt *build.Context, path string) bool { | ||||
| 	if ctxt.OpenFile != nil { | ||||
| 		r, err := ctxt.OpenFile(path) | ||||
| 		if err != nil { | ||||
| 			return false | ||||
| 		} | ||||
| 		r.Close() // ignore error | ||||
| 		return true | ||||
| 	} | ||||
| 	_, err := os.Stat(path) | ||||
| 	return err == nil | ||||
| } | ||||
|  | ||||
| // OpenFile behaves like os.Open, | ||||
| // but uses the build context's file system interface, if any. | ||||
| func OpenFile(ctxt *build.Context, path string) (io.ReadCloser, error) { | ||||
| 	if ctxt.OpenFile != nil { | ||||
| 		return ctxt.OpenFile(path) | ||||
| 	} | ||||
| 	return os.Open(path) | ||||
| } | ||||
|  | ||||
| // IsAbsPath behaves like filepath.IsAbs, | ||||
| // but uses the build context's file system interface, if any. | ||||
| func IsAbsPath(ctxt *build.Context, path string) bool { | ||||
| 	if ctxt.IsAbsPath != nil { | ||||
| 		return ctxt.IsAbsPath(path) | ||||
| 	} | ||||
| 	return filepath.IsAbs(path) | ||||
| } | ||||
|  | ||||
| // JoinPath behaves like filepath.Join, | ||||
| // but uses the build context's file system interface, if any. | ||||
| func JoinPath(ctxt *build.Context, path ...string) string { | ||||
| 	if ctxt.JoinPath != nil { | ||||
| 		return ctxt.JoinPath(path...) | ||||
| 	} | ||||
| 	return filepath.Join(path...) | ||||
| } | ||||
|  | ||||
| // IsDir behaves like os.Stat plus IsDir, | ||||
| // but uses the build context's file system interface, if any. | ||||
| func IsDir(ctxt *build.Context, path string) bool { | ||||
| 	if ctxt.IsDir != nil { | ||||
| 		return ctxt.IsDir(path) | ||||
| 	} | ||||
| 	fi, err := os.Stat(path) | ||||
| 	return err == nil && fi.IsDir() | ||||
| } | ||||
|  | ||||
| // ReadDir behaves like ioutil.ReadDir, | ||||
| // but uses the build context's file system interface, if any. | ||||
| func ReadDir(ctxt *build.Context, path string) ([]os.FileInfo, error) { | ||||
| 	if ctxt.ReadDir != nil { | ||||
| 		return ctxt.ReadDir(path) | ||||
| 	} | ||||
| 	return ioutil.ReadDir(path) | ||||
| } | ||||
|  | ||||
| // SplitPathList behaves like filepath.SplitList, | ||||
| // but uses the build context's file system interface, if any. | ||||
| func SplitPathList(ctxt *build.Context, s string) []string { | ||||
| 	if ctxt.SplitPathList != nil { | ||||
| 		return ctxt.SplitPathList(s) | ||||
| 	} | ||||
| 	return filepath.SplitList(s) | ||||
| } | ||||
|  | ||||
| // sameFile returns true if x and y have the same basename and denote | ||||
| // the same file. | ||||
| // | ||||
| func sameFile(x, y string) bool { | ||||
| 	if path.Clean(x) == path.Clean(y) { | ||||
| 		return true | ||||
| 	} | ||||
| 	if filepath.Base(x) == filepath.Base(y) { // (optimisation) | ||||
| 		if xi, err := os.Stat(x); err == nil { | ||||
| 			if yi, err := os.Stat(y); err == nil { | ||||
| 				return os.SameFile(xi, yi) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
							
								
								
									
										23
									
								
								vendor/golang.org/x/tools/go/types/objectpath/BUILD
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										23
									
								
								vendor/golang.org/x/tools/go/types/objectpath/BUILD
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,23 @@ | ||||
| load("@io_bazel_rules_go//go:def.bzl", "go_library") | ||||
|  | ||||
| go_library( | ||||
|     name = "go_default_library", | ||||
|     srcs = ["objectpath.go"], | ||||
|     importmap = "k8s.io/kubernetes/vendor/golang.org/x/tools/go/types/objectpath", | ||||
|     importpath = "golang.org/x/tools/go/types/objectpath", | ||||
|     visibility = ["//visibility:public"], | ||||
| ) | ||||
|  | ||||
| filegroup( | ||||
|     name = "package-srcs", | ||||
|     srcs = glob(["**"]), | ||||
|     tags = ["automanaged"], | ||||
|     visibility = ["//visibility:private"], | ||||
| ) | ||||
|  | ||||
| filegroup( | ||||
|     name = "all-srcs", | ||||
|     srcs = [":package-srcs"], | ||||
|     tags = ["automanaged"], | ||||
|     visibility = ["//visibility:public"], | ||||
| ) | ||||
							
								
								
									
										523
									
								
								vendor/golang.org/x/tools/go/types/objectpath/objectpath.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										523
									
								
								vendor/golang.org/x/tools/go/types/objectpath/objectpath.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,523 @@ | ||||
| // Copyright 2018 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| // Package objectpath defines a naming scheme for types.Objects | ||||
| // (that is, named entities in Go programs) relative to their enclosing | ||||
| // package. | ||||
| // | ||||
| // Type-checker objects are canonical, so they are usually identified by | ||||
| // their address in memory (a pointer), but a pointer has meaning only | ||||
| // within one address space. By contrast, objectpath names allow the | ||||
| // identity of an object to be sent from one program to another, | ||||
| // establishing a correspondence between types.Object variables that are | ||||
| // distinct but logically equivalent. | ||||
| // | ||||
| // A single object may have multiple paths. In this example, | ||||
| //     type A struct{ X int } | ||||
| //     type B A | ||||
| // the field X has two paths due to its membership of both A and B. | ||||
| // The For(obj) function always returns one of these paths, arbitrarily | ||||
| // but consistently. | ||||
| package objectpath | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"strconv" | ||||
| 	"strings" | ||||
|  | ||||
| 	"go/types" | ||||
| ) | ||||
|  | ||||
| // A Path is an opaque name that identifies a types.Object | ||||
| // relative to its package. Conceptually, the name consists of a | ||||
| // sequence of destructuring operations applied to the package scope | ||||
| // to obtain the original object. | ||||
| // The name does not include the package itself. | ||||
| type Path string | ||||
|  | ||||
| // Encoding | ||||
| // | ||||
| // An object path is a textual and (with training) human-readable encoding | ||||
| // of a sequence of destructuring operators, starting from a types.Package. | ||||
| // The sequences represent a path through the package/object/type graph. | ||||
| // We classify these operators by their type: | ||||
| // | ||||
| //   PO package->object	Package.Scope.Lookup | ||||
| //   OT  object->type 	Object.Type | ||||
| //   TT    type->type 	Type.{Elem,Key,Params,Results,Underlying} [EKPRU] | ||||
| //   TO   type->object	Type.{At,Field,Method,Obj} [AFMO] | ||||
| // | ||||
| // All valid paths start with a package and end at an object | ||||
| // and thus may be defined by the regular language: | ||||
| // | ||||
| //   objectpath = PO (OT TT* TO)* | ||||
| // | ||||
| // The concrete encoding follows directly: | ||||
| // - The only PO operator is Package.Scope.Lookup, which requires an identifier. | ||||
| // - The only OT operator is Object.Type, | ||||
| //   which we encode as '.' because dot cannot appear in an identifier. | ||||
| // - The TT operators are encoded as [EKPRU]. | ||||
| // - The OT operators are encoded as [AFMO]; | ||||
| //   three of these (At,Field,Method) require an integer operand, | ||||
| //   which is encoded as a string of decimal digits. | ||||
| //   These indices are stable across different representations | ||||
| //   of the same package, even source and export data. | ||||
| // | ||||
| // In the example below, | ||||
| // | ||||
| //	package p | ||||
| // | ||||
| //	type T interface { | ||||
| //		f() (a string, b struct{ X int }) | ||||
| //	} | ||||
| // | ||||
| // field X has the path "T.UM0.RA1.F0", | ||||
| // representing the following sequence of operations: | ||||
| // | ||||
| //    p.Lookup("T")					T | ||||
| //    .Type().Underlying().Method(0).			f | ||||
| //    .Type().Results().At(1)				b | ||||
| //    .Type().Field(0)					X | ||||
| // | ||||
| // The encoding is not maximally compact---every R or P is | ||||
| // followed by an A, for example---but this simplifies the | ||||
| // encoder and decoder. | ||||
| // | ||||
| const ( | ||||
| 	// object->type operators | ||||
| 	opType = '.' // .Type()		  (Object) | ||||
|  | ||||
| 	// type->type operators | ||||
| 	opElem       = 'E' // .Elem()		(Pointer, Slice, Array, Chan, Map) | ||||
| 	opKey        = 'K' // .Key()		(Map) | ||||
| 	opParams     = 'P' // .Params()		(Signature) | ||||
| 	opResults    = 'R' // .Results()	(Signature) | ||||
| 	opUnderlying = 'U' // .Underlying()	(Named) | ||||
|  | ||||
| 	// type->object operators | ||||
| 	opAt     = 'A' // .At(i)		(Tuple) | ||||
| 	opField  = 'F' // .Field(i)		(Struct) | ||||
| 	opMethod = 'M' // .Method(i)		(Named or Interface; not Struct: "promoted" names are ignored) | ||||
| 	opObj    = 'O' // .Obj()		(Named) | ||||
| ) | ||||
|  | ||||
| // The For function returns the path to an object relative to its package, | ||||
| // or an error if the object is not accessible from the package's Scope. | ||||
| // | ||||
| // The For function guarantees to return a path only for the following objects: | ||||
| // - package-level types | ||||
| // - exported package-level non-types | ||||
| // - methods | ||||
| // - parameter and result variables | ||||
| // - struct fields | ||||
| // These objects are sufficient to define the API of their package. | ||||
| // The objects described by a package's export data are drawn from this set. | ||||
| // | ||||
| // For does not return a path for predeclared names, imported package | ||||
| // names, local names, and unexported package-level names (except | ||||
| // types). | ||||
| // | ||||
| // Example: given this definition, | ||||
| // | ||||
| //	package p | ||||
| // | ||||
| //	type T interface { | ||||
| //		f() (a string, b struct{ X int }) | ||||
| //	} | ||||
| // | ||||
| // For(X) would return a path that denotes the following sequence of operations: | ||||
| // | ||||
| //    p.Scope().Lookup("T")				(TypeName T) | ||||
| //    .Type().Underlying().Method(0).			(method Func f) | ||||
| //    .Type().Results().At(1)				(field Var b) | ||||
| //    .Type().Field(0)					(field Var X) | ||||
| // | ||||
| // where p is the package (*types.Package) to which X belongs. | ||||
| func For(obj types.Object) (Path, error) { | ||||
| 	pkg := obj.Pkg() | ||||
|  | ||||
| 	// This table lists the cases of interest. | ||||
| 	// | ||||
| 	// Object				Action | ||||
| 	// ------                               ------ | ||||
| 	// nil					reject | ||||
| 	// builtin				reject | ||||
| 	// pkgname				reject | ||||
| 	// label				reject | ||||
| 	// var | ||||
| 	//    package-level			accept | ||||
| 	//    func param/result			accept | ||||
| 	//    local				reject | ||||
| 	//    struct field			accept | ||||
| 	// const | ||||
| 	//    package-level			accept | ||||
| 	//    local				reject | ||||
| 	// func | ||||
| 	//    package-level			accept | ||||
| 	//    init functions			reject | ||||
| 	//    concrete method			accept | ||||
| 	//    interface method			accept | ||||
| 	// type | ||||
| 	//    package-level			accept | ||||
| 	//    local				reject | ||||
| 	// | ||||
| 	// The only accessible package-level objects are members of pkg itself. | ||||
| 	// | ||||
| 	// The cases are handled in four steps: | ||||
| 	// | ||||
| 	// 1. reject nil and builtin | ||||
| 	// 2. accept package-level objects | ||||
| 	// 3. reject obviously invalid objects | ||||
| 	// 4. search the API for the path to the param/result/field/method. | ||||
|  | ||||
| 	// 1. reference to nil or builtin? | ||||
| 	if pkg == nil { | ||||
| 		return "", fmt.Errorf("predeclared %s has no path", obj) | ||||
| 	} | ||||
| 	scope := pkg.Scope() | ||||
|  | ||||
| 	// 2. package-level object? | ||||
| 	if scope.Lookup(obj.Name()) == obj { | ||||
| 		// Only exported objects (and non-exported types) have a path. | ||||
| 		// Non-exported types may be referenced by other objects. | ||||
| 		if _, ok := obj.(*types.TypeName); !ok && !obj.Exported() { | ||||
| 			return "", fmt.Errorf("no path for non-exported %v", obj) | ||||
| 		} | ||||
| 		return Path(obj.Name()), nil | ||||
| 	} | ||||
|  | ||||
| 	// 3. Not a package-level object. | ||||
| 	//    Reject obviously non-viable cases. | ||||
| 	switch obj := obj.(type) { | ||||
| 	case *types.Const, // Only package-level constants have a path. | ||||
| 		*types.TypeName, // Only package-level types have a path. | ||||
| 		*types.Label,    // Labels are function-local. | ||||
| 		*types.PkgName:  // PkgNames are file-local. | ||||
| 		return "", fmt.Errorf("no path for %v", obj) | ||||
|  | ||||
| 	case *types.Var: | ||||
| 		// Could be: | ||||
| 		// - a field (obj.IsField()) | ||||
| 		// - a func parameter or result | ||||
| 		// - a local var. | ||||
| 		// Sadly there is no way to distinguish | ||||
| 		// a param/result from a local | ||||
| 		// so we must proceed to the find. | ||||
|  | ||||
| 	case *types.Func: | ||||
| 		// A func, if not package-level, must be a method. | ||||
| 		if recv := obj.Type().(*types.Signature).Recv(); recv == nil { | ||||
| 			return "", fmt.Errorf("func is not a method: %v", obj) | ||||
| 		} | ||||
| 		// TODO(adonovan): opt: if the method is concrete, | ||||
| 		// do a specialized version of the rest of this function so | ||||
| 		// that it's O(1) not O(|scope|).  Basically 'find' is needed | ||||
| 		// only for struct fields and interface methods. | ||||
|  | ||||
| 	default: | ||||
| 		panic(obj) | ||||
| 	} | ||||
|  | ||||
| 	// 4. Search the API for the path to the var (field/param/result) or method. | ||||
|  | ||||
| 	// First inspect package-level named types. | ||||
| 	// In the presence of path aliases, these give | ||||
| 	// the best paths because non-types may | ||||
| 	// refer to types, but not the reverse. | ||||
| 	empty := make([]byte, 0, 48) // initial space | ||||
| 	for _, name := range scope.Names() { | ||||
| 		o := scope.Lookup(name) | ||||
| 		tname, ok := o.(*types.TypeName) | ||||
| 		if !ok { | ||||
| 			continue // handle non-types in second pass | ||||
| 		} | ||||
|  | ||||
| 		path := append(empty, name...) | ||||
| 		path = append(path, opType) | ||||
|  | ||||
| 		T := o.Type() | ||||
|  | ||||
| 		if tname.IsAlias() { | ||||
| 			// type alias | ||||
| 			if r := find(obj, T, path); r != nil { | ||||
| 				return Path(r), nil | ||||
| 			} | ||||
| 		} else { | ||||
| 			// defined (named) type | ||||
| 			if r := find(obj, T.Underlying(), append(path, opUnderlying)); r != nil { | ||||
| 				return Path(r), nil | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// Then inspect everything else: | ||||
| 	// non-types, and declared methods of defined types. | ||||
| 	for _, name := range scope.Names() { | ||||
| 		o := scope.Lookup(name) | ||||
| 		path := append(empty, name...) | ||||
| 		if _, ok := o.(*types.TypeName); !ok { | ||||
| 			if o.Exported() { | ||||
| 				// exported non-type (const, var, func) | ||||
| 				if r := find(obj, o.Type(), append(path, opType)); r != nil { | ||||
| 					return Path(r), nil | ||||
| 				} | ||||
| 			} | ||||
| 			continue | ||||
| 		} | ||||
|  | ||||
| 		// Inspect declared methods of defined types. | ||||
| 		if T, ok := o.Type().(*types.Named); ok { | ||||
| 			path = append(path, opType) | ||||
| 			for i := 0; i < T.NumMethods(); i++ { | ||||
| 				m := T.Method(i) | ||||
| 				path2 := appendOpArg(path, opMethod, i) | ||||
| 				if m == obj { | ||||
| 					return Path(path2), nil // found declared method | ||||
| 				} | ||||
| 				if r := find(obj, m.Type(), append(path2, opType)); r != nil { | ||||
| 					return Path(r), nil | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return "", fmt.Errorf("can't find path for %v in %s", obj, pkg.Path()) | ||||
| } | ||||
|  | ||||
| func appendOpArg(path []byte, op byte, arg int) []byte { | ||||
| 	path = append(path, op) | ||||
| 	path = strconv.AppendInt(path, int64(arg), 10) | ||||
| 	return path | ||||
| } | ||||
|  | ||||
| // find finds obj within type T, returning the path to it, or nil if not found. | ||||
| func find(obj types.Object, T types.Type, path []byte) []byte { | ||||
| 	switch T := T.(type) { | ||||
| 	case *types.Basic, *types.Named: | ||||
| 		// Named types belonging to pkg were handled already, | ||||
| 		// so T must belong to another package. No path. | ||||
| 		return nil | ||||
| 	case *types.Pointer: | ||||
| 		return find(obj, T.Elem(), append(path, opElem)) | ||||
| 	case *types.Slice: | ||||
| 		return find(obj, T.Elem(), append(path, opElem)) | ||||
| 	case *types.Array: | ||||
| 		return find(obj, T.Elem(), append(path, opElem)) | ||||
| 	case *types.Chan: | ||||
| 		return find(obj, T.Elem(), append(path, opElem)) | ||||
| 	case *types.Map: | ||||
| 		if r := find(obj, T.Key(), append(path, opKey)); r != nil { | ||||
| 			return r | ||||
| 		} | ||||
| 		return find(obj, T.Elem(), append(path, opElem)) | ||||
| 	case *types.Signature: | ||||
| 		if r := find(obj, T.Params(), append(path, opParams)); r != nil { | ||||
| 			return r | ||||
| 		} | ||||
| 		return find(obj, T.Results(), append(path, opResults)) | ||||
| 	case *types.Struct: | ||||
| 		for i := 0; i < T.NumFields(); i++ { | ||||
| 			f := T.Field(i) | ||||
| 			path2 := appendOpArg(path, opField, i) | ||||
| 			if f == obj { | ||||
| 				return path2 // found field var | ||||
| 			} | ||||
| 			if r := find(obj, f.Type(), append(path2, opType)); r != nil { | ||||
| 				return r | ||||
| 			} | ||||
| 		} | ||||
| 		return nil | ||||
| 	case *types.Tuple: | ||||
| 		for i := 0; i < T.Len(); i++ { | ||||
| 			v := T.At(i) | ||||
| 			path2 := appendOpArg(path, opAt, i) | ||||
| 			if v == obj { | ||||
| 				return path2 // found param/result var | ||||
| 			} | ||||
| 			if r := find(obj, v.Type(), append(path2, opType)); r != nil { | ||||
| 				return r | ||||
| 			} | ||||
| 		} | ||||
| 		return nil | ||||
| 	case *types.Interface: | ||||
| 		for i := 0; i < T.NumMethods(); i++ { | ||||
| 			m := T.Method(i) | ||||
| 			path2 := appendOpArg(path, opMethod, i) | ||||
| 			if m == obj { | ||||
| 				return path2 // found interface method | ||||
| 			} | ||||
| 			if r := find(obj, m.Type(), append(path2, opType)); r != nil { | ||||
| 				return r | ||||
| 			} | ||||
| 		} | ||||
| 		return nil | ||||
| 	} | ||||
| 	panic(T) | ||||
| } | ||||
|  | ||||
| // Object returns the object denoted by path p within the package pkg. | ||||
| func Object(pkg *types.Package, p Path) (types.Object, error) { | ||||
| 	if p == "" { | ||||
| 		return nil, fmt.Errorf("empty path") | ||||
| 	} | ||||
|  | ||||
| 	pathstr := string(p) | ||||
| 	var pkgobj, suffix string | ||||
| 	if dot := strings.IndexByte(pathstr, opType); dot < 0 { | ||||
| 		pkgobj = pathstr | ||||
| 	} else { | ||||
| 		pkgobj = pathstr[:dot] | ||||
| 		suffix = pathstr[dot:] // suffix starts with "." | ||||
| 	} | ||||
|  | ||||
| 	obj := pkg.Scope().Lookup(pkgobj) | ||||
| 	if obj == nil { | ||||
| 		return nil, fmt.Errorf("package %s does not contain %q", pkg.Path(), pkgobj) | ||||
| 	} | ||||
|  | ||||
| 	// abtraction of *types.{Pointer,Slice,Array,Chan,Map} | ||||
| 	type hasElem interface { | ||||
| 		Elem() types.Type | ||||
| 	} | ||||
| 	// abstraction of *types.{Interface,Named} | ||||
| 	type hasMethods interface { | ||||
| 		Method(int) *types.Func | ||||
| 		NumMethods() int | ||||
| 	} | ||||
|  | ||||
| 	// The loop state is the pair (t, obj), | ||||
| 	// exactly one of which is non-nil, initially obj. | ||||
| 	// All suffixes start with '.' (the only object->type operation), | ||||
| 	// followed by optional type->type operations, | ||||
| 	// then a type->object operation. | ||||
| 	// The cycle then repeats. | ||||
| 	var t types.Type | ||||
| 	for suffix != "" { | ||||
| 		code := suffix[0] | ||||
| 		suffix = suffix[1:] | ||||
|  | ||||
| 		// Codes [AFM] have an integer operand. | ||||
| 		var index int | ||||
| 		switch code { | ||||
| 		case opAt, opField, opMethod: | ||||
| 			rest := strings.TrimLeft(suffix, "0123456789") | ||||
| 			numerals := suffix[:len(suffix)-len(rest)] | ||||
| 			suffix = rest | ||||
| 			i, err := strconv.Atoi(numerals) | ||||
| 			if err != nil { | ||||
| 				return nil, fmt.Errorf("invalid path: bad numeric operand %q for code %q", numerals, code) | ||||
| 			} | ||||
| 			index = int(i) | ||||
| 		case opObj: | ||||
| 			// no operand | ||||
| 		default: | ||||
| 			// The suffix must end with a type->object operation. | ||||
| 			if suffix == "" { | ||||
| 				return nil, fmt.Errorf("invalid path: ends with %q, want [AFMO]", code) | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		if code == opType { | ||||
| 			if t != nil { | ||||
| 				return nil, fmt.Errorf("invalid path: unexpected %q in type context", opType) | ||||
| 			} | ||||
| 			t = obj.Type() | ||||
| 			obj = nil | ||||
| 			continue | ||||
| 		} | ||||
|  | ||||
| 		if t == nil { | ||||
| 			return nil, fmt.Errorf("invalid path: code %q in object context", code) | ||||
| 		} | ||||
|  | ||||
| 		// Inv: t != nil, obj == nil | ||||
|  | ||||
| 		switch code { | ||||
| 		case opElem: | ||||
| 			hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map | ||||
| 			if !ok { | ||||
| 				return nil, fmt.Errorf("cannot apply %q to %s (got %T, want pointer, slice, array, chan or map)", code, t, t) | ||||
| 			} | ||||
| 			t = hasElem.Elem() | ||||
|  | ||||
| 		case opKey: | ||||
| 			mapType, ok := t.(*types.Map) | ||||
| 			if !ok { | ||||
| 				return nil, fmt.Errorf("cannot apply %q to %s (got %T, want map)", code, t, t) | ||||
| 			} | ||||
| 			t = mapType.Key() | ||||
|  | ||||
| 		case opParams: | ||||
| 			sig, ok := t.(*types.Signature) | ||||
| 			if !ok { | ||||
| 				return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t) | ||||
| 			} | ||||
| 			t = sig.Params() | ||||
|  | ||||
| 		case opResults: | ||||
| 			sig, ok := t.(*types.Signature) | ||||
| 			if !ok { | ||||
| 				return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t) | ||||
| 			} | ||||
| 			t = sig.Results() | ||||
|  | ||||
| 		case opUnderlying: | ||||
| 			named, ok := t.(*types.Named) | ||||
| 			if !ok { | ||||
| 				return nil, fmt.Errorf("cannot apply %q to %s (got %s, want named)", code, t, t) | ||||
| 			} | ||||
| 			t = named.Underlying() | ||||
|  | ||||
| 		case opAt: | ||||
| 			tuple, ok := t.(*types.Tuple) | ||||
| 			if !ok { | ||||
| 				return nil, fmt.Errorf("cannot apply %q to %s (got %s, want tuple)", code, t, t) | ||||
| 			} | ||||
| 			if n := tuple.Len(); index >= n { | ||||
| 				return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n) | ||||
| 			} | ||||
| 			obj = tuple.At(index) | ||||
| 			t = nil | ||||
|  | ||||
| 		case opField: | ||||
| 			structType, ok := t.(*types.Struct) | ||||
| 			if !ok { | ||||
| 				return nil, fmt.Errorf("cannot apply %q to %s (got %T, want struct)", code, t, t) | ||||
| 			} | ||||
| 			if n := structType.NumFields(); index >= n { | ||||
| 				return nil, fmt.Errorf("field index %d out of range [0-%d)", index, n) | ||||
| 			} | ||||
| 			obj = structType.Field(index) | ||||
| 			t = nil | ||||
|  | ||||
| 		case opMethod: | ||||
| 			hasMethods, ok := t.(hasMethods) // Interface or Named | ||||
| 			if !ok { | ||||
| 				return nil, fmt.Errorf("cannot apply %q to %s (got %s, want interface or named)", code, t, t) | ||||
| 			} | ||||
| 			if n := hasMethods.NumMethods(); index >= n { | ||||
| 				return nil, fmt.Errorf("method index %d out of range [0-%d)", index, n) | ||||
| 			} | ||||
| 			obj = hasMethods.Method(index) | ||||
| 			t = nil | ||||
|  | ||||
| 		case opObj: | ||||
| 			named, ok := t.(*types.Named) | ||||
| 			if !ok { | ||||
| 				return nil, fmt.Errorf("cannot apply %q to %s (got %s, want named)", code, t, t) | ||||
| 			} | ||||
| 			obj = named.Obj() | ||||
| 			t = nil | ||||
|  | ||||
| 		default: | ||||
| 			return nil, fmt.Errorf("invalid path: unknown code %q", code) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if obj.Pkg() != pkg { | ||||
| 		return nil, fmt.Errorf("path denotes %s, which belongs to a different package", obj) | ||||
| 	} | ||||
|  | ||||
| 	return obj, nil // success | ||||
| } | ||||
							
								
								
									
										30
									
								
								vendor/golang.org/x/tools/go/types/typeutil/BUILD
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										30
									
								
								vendor/golang.org/x/tools/go/types/typeutil/BUILD
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,30 @@ | ||||
| load("@io_bazel_rules_go//go:def.bzl", "go_library") | ||||
|  | ||||
| go_library( | ||||
|     name = "go_default_library", | ||||
|     srcs = [ | ||||
|         "callee.go", | ||||
|         "imports.go", | ||||
|         "map.go", | ||||
|         "methodsetcache.go", | ||||
|         "ui.go", | ||||
|     ], | ||||
|     importmap = "k8s.io/kubernetes/vendor/golang.org/x/tools/go/types/typeutil", | ||||
|     importpath = "golang.org/x/tools/go/types/typeutil", | ||||
|     visibility = ["//visibility:public"], | ||||
|     deps = ["//vendor/golang.org/x/tools/go/ast/astutil:go_default_library"], | ||||
| ) | ||||
|  | ||||
| filegroup( | ||||
|     name = "package-srcs", | ||||
|     srcs = glob(["**"]), | ||||
|     tags = ["automanaged"], | ||||
|     visibility = ["//visibility:private"], | ||||
| ) | ||||
|  | ||||
| filegroup( | ||||
|     name = "all-srcs", | ||||
|     srcs = [":package-srcs"], | ||||
|     tags = ["automanaged"], | ||||
|     visibility = ["//visibility:public"], | ||||
| ) | ||||
							
								
								
									
										46
									
								
								vendor/golang.org/x/tools/go/types/typeutil/callee.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										46
									
								
								vendor/golang.org/x/tools/go/types/typeutil/callee.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,46 @@ | ||||
| // Copyright 2018 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package typeutil | ||||
|  | ||||
| import ( | ||||
| 	"go/ast" | ||||
| 	"go/types" | ||||
|  | ||||
| 	"golang.org/x/tools/go/ast/astutil" | ||||
| ) | ||||
|  | ||||
| // Callee returns the named target of a function call, if any: | ||||
| // a function, method, builtin, or variable. | ||||
| func Callee(info *types.Info, call *ast.CallExpr) types.Object { | ||||
| 	var obj types.Object | ||||
| 	switch fun := astutil.Unparen(call.Fun).(type) { | ||||
| 	case *ast.Ident: | ||||
| 		obj = info.Uses[fun] // type, var, builtin, or declared func | ||||
| 	case *ast.SelectorExpr: | ||||
| 		if sel, ok := info.Selections[fun]; ok { | ||||
| 			obj = sel.Obj() // method or field | ||||
| 		} else { | ||||
| 			obj = info.Uses[fun.Sel] // qualified identifier? | ||||
| 		} | ||||
| 	} | ||||
| 	if _, ok := obj.(*types.TypeName); ok { | ||||
| 		return nil // T(x) is a conversion, not a call | ||||
| 	} | ||||
| 	return obj | ||||
| } | ||||
|  | ||||
| // StaticCallee returns the target (function or method) of a static | ||||
| // function call, if any. It returns nil for calls to builtins. | ||||
| func StaticCallee(info *types.Info, call *ast.CallExpr) *types.Func { | ||||
| 	if f, ok := Callee(info, call).(*types.Func); ok && !interfaceMethod(f) { | ||||
| 		return f | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func interfaceMethod(f *types.Func) bool { | ||||
| 	recv := f.Type().(*types.Signature).Recv() | ||||
| 	return recv != nil && types.IsInterface(recv.Type()) | ||||
| } | ||||
							
								
								
									
										31
									
								
								vendor/golang.org/x/tools/go/types/typeutil/imports.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										31
									
								
								vendor/golang.org/x/tools/go/types/typeutil/imports.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,31 @@ | ||||
| // Copyright 2014 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package typeutil | ||||
|  | ||||
| import "go/types" | ||||
|  | ||||
| // Dependencies returns all dependencies of the specified packages. | ||||
| // | ||||
| // Dependent packages appear in topological order: if package P imports | ||||
| // package Q, Q appears earlier than P in the result. | ||||
| // The algorithm follows import statements in the order they | ||||
| // appear in the source code, so the result is a total order. | ||||
| // | ||||
| func Dependencies(pkgs ...*types.Package) []*types.Package { | ||||
| 	var result []*types.Package | ||||
| 	seen := make(map[*types.Package]bool) | ||||
| 	var visit func(pkgs []*types.Package) | ||||
| 	visit = func(pkgs []*types.Package) { | ||||
| 		for _, p := range pkgs { | ||||
| 			if !seen[p] { | ||||
| 				seen[p] = true | ||||
| 				visit(p.Imports()) | ||||
| 				result = append(result, p) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	visit(pkgs) | ||||
| 	return result | ||||
| } | ||||
							
								
								
									
										313
									
								
								vendor/golang.org/x/tools/go/types/typeutil/map.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										313
									
								
								vendor/golang.org/x/tools/go/types/typeutil/map.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,313 @@ | ||||
| // Copyright 2014 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| // Package typeutil defines various utilities for types, such as Map, | ||||
| // a mapping from types.Type to interface{} values. | ||||
| package typeutil // import "golang.org/x/tools/go/types/typeutil" | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"fmt" | ||||
| 	"go/types" | ||||
| 	"reflect" | ||||
| ) | ||||
|  | ||||
| // Map is a hash-table-based mapping from types (types.Type) to | ||||
| // arbitrary interface{} values.  The concrete types that implement | ||||
| // the Type interface are pointers.  Since they are not canonicalized, | ||||
| // == cannot be used to check for equivalence, and thus we cannot | ||||
| // simply use a Go map. | ||||
| // | ||||
| // Just as with map[K]V, a nil *Map is a valid empty map. | ||||
| // | ||||
| // Not thread-safe. | ||||
| // | ||||
| type Map struct { | ||||
| 	hasher Hasher             // shared by many Maps | ||||
| 	table  map[uint32][]entry // maps hash to bucket; entry.key==nil means unused | ||||
| 	length int                // number of map entries | ||||
| } | ||||
|  | ||||
| // entry is an entry (key/value association) in a hash bucket. | ||||
| type entry struct { | ||||
| 	key   types.Type | ||||
| 	value interface{} | ||||
| } | ||||
|  | ||||
| // SetHasher sets the hasher used by Map. | ||||
| // | ||||
| // All Hashers are functionally equivalent but contain internal state | ||||
| // used to cache the results of hashing previously seen types. | ||||
| // | ||||
| // A single Hasher created by MakeHasher() may be shared among many | ||||
| // Maps.  This is recommended if the instances have many keys in | ||||
| // common, as it will amortize the cost of hash computation. | ||||
| // | ||||
| // A Hasher may grow without bound as new types are seen.  Even when a | ||||
| // type is deleted from the map, the Hasher never shrinks, since other | ||||
| // types in the map may reference the deleted type indirectly. | ||||
| // | ||||
| // Hashers are not thread-safe, and read-only operations such as | ||||
| // Map.Lookup require updates to the hasher, so a full Mutex lock (not a | ||||
| // read-lock) is require around all Map operations if a shared | ||||
| // hasher is accessed from multiple threads. | ||||
| // | ||||
| // If SetHasher is not called, the Map will create a private hasher at | ||||
| // the first call to Insert. | ||||
| // | ||||
| func (m *Map) SetHasher(hasher Hasher) { | ||||
| 	m.hasher = hasher | ||||
| } | ||||
|  | ||||
| // Delete removes the entry with the given key, if any. | ||||
| // It returns true if the entry was found. | ||||
| // | ||||
| func (m *Map) Delete(key types.Type) bool { | ||||
| 	if m != nil && m.table != nil { | ||||
| 		hash := m.hasher.Hash(key) | ||||
| 		bucket := m.table[hash] | ||||
| 		for i, e := range bucket { | ||||
| 			if e.key != nil && types.Identical(key, e.key) { | ||||
| 				// We can't compact the bucket as it | ||||
| 				// would disturb iterators. | ||||
| 				bucket[i] = entry{} | ||||
| 				m.length-- | ||||
| 				return true | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| // At returns the map entry for the given key. | ||||
| // The result is nil if the entry is not present. | ||||
| // | ||||
| func (m *Map) At(key types.Type) interface{} { | ||||
| 	if m != nil && m.table != nil { | ||||
| 		for _, e := range m.table[m.hasher.Hash(key)] { | ||||
| 			if e.key != nil && types.Identical(key, e.key) { | ||||
| 				return e.value | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // Set sets the map entry for key to val, | ||||
| // and returns the previous entry, if any. | ||||
| func (m *Map) Set(key types.Type, value interface{}) (prev interface{}) { | ||||
| 	if m.table != nil { | ||||
| 		hash := m.hasher.Hash(key) | ||||
| 		bucket := m.table[hash] | ||||
| 		var hole *entry | ||||
| 		for i, e := range bucket { | ||||
| 			if e.key == nil { | ||||
| 				hole = &bucket[i] | ||||
| 			} else if types.Identical(key, e.key) { | ||||
| 				prev = e.value | ||||
| 				bucket[i].value = value | ||||
| 				return | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		if hole != nil { | ||||
| 			*hole = entry{key, value} // overwrite deleted entry | ||||
| 		} else { | ||||
| 			m.table[hash] = append(bucket, entry{key, value}) | ||||
| 		} | ||||
| 	} else { | ||||
| 		if m.hasher.memo == nil { | ||||
| 			m.hasher = MakeHasher() | ||||
| 		} | ||||
| 		hash := m.hasher.Hash(key) | ||||
| 		m.table = map[uint32][]entry{hash: {entry{key, value}}} | ||||
| 	} | ||||
|  | ||||
| 	m.length++ | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // Len returns the number of map entries. | ||||
| func (m *Map) Len() int { | ||||
| 	if m != nil { | ||||
| 		return m.length | ||||
| 	} | ||||
| 	return 0 | ||||
| } | ||||
|  | ||||
| // Iterate calls function f on each entry in the map in unspecified order. | ||||
| // | ||||
| // If f should mutate the map, Iterate provides the same guarantees as | ||||
| // Go maps: if f deletes a map entry that Iterate has not yet reached, | ||||
| // f will not be invoked for it, but if f inserts a map entry that | ||||
| // Iterate has not yet reached, whether or not f will be invoked for | ||||
| // it is unspecified. | ||||
| // | ||||
| func (m *Map) Iterate(f func(key types.Type, value interface{})) { | ||||
| 	if m != nil { | ||||
| 		for _, bucket := range m.table { | ||||
| 			for _, e := range bucket { | ||||
| 				if e.key != nil { | ||||
| 					f(e.key, e.value) | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Keys returns a new slice containing the set of map keys. | ||||
| // The order is unspecified. | ||||
| func (m *Map) Keys() []types.Type { | ||||
| 	keys := make([]types.Type, 0, m.Len()) | ||||
| 	m.Iterate(func(key types.Type, _ interface{}) { | ||||
| 		keys = append(keys, key) | ||||
| 	}) | ||||
| 	return keys | ||||
| } | ||||
|  | ||||
| func (m *Map) toString(values bool) string { | ||||
| 	if m == nil { | ||||
| 		return "{}" | ||||
| 	} | ||||
| 	var buf bytes.Buffer | ||||
| 	fmt.Fprint(&buf, "{") | ||||
| 	sep := "" | ||||
| 	m.Iterate(func(key types.Type, value interface{}) { | ||||
| 		fmt.Fprint(&buf, sep) | ||||
| 		sep = ", " | ||||
| 		fmt.Fprint(&buf, key) | ||||
| 		if values { | ||||
| 			fmt.Fprintf(&buf, ": %q", value) | ||||
| 		} | ||||
| 	}) | ||||
| 	fmt.Fprint(&buf, "}") | ||||
| 	return buf.String() | ||||
| } | ||||
|  | ||||
| // String returns a string representation of the map's entries. | ||||
| // Values are printed using fmt.Sprintf("%v", v). | ||||
| // Order is unspecified. | ||||
| // | ||||
| func (m *Map) String() string { | ||||
| 	return m.toString(true) | ||||
| } | ||||
|  | ||||
| // KeysString returns a string representation of the map's key set. | ||||
| // Order is unspecified. | ||||
| // | ||||
| func (m *Map) KeysString() string { | ||||
| 	return m.toString(false) | ||||
| } | ||||
|  | ||||
| //////////////////////////////////////////////////////////////////////// | ||||
| // Hasher | ||||
|  | ||||
| // A Hasher maps each type to its hash value. | ||||
| // For efficiency, a hasher uses memoization; thus its memory | ||||
| // footprint grows monotonically over time. | ||||
| // Hashers are not thread-safe. | ||||
| // Hashers have reference semantics. | ||||
| // Call MakeHasher to create a Hasher. | ||||
| type Hasher struct { | ||||
| 	memo map[types.Type]uint32 | ||||
| } | ||||
|  | ||||
| // MakeHasher returns a new Hasher instance. | ||||
| func MakeHasher() Hasher { | ||||
| 	return Hasher{make(map[types.Type]uint32)} | ||||
| } | ||||
|  | ||||
| // Hash computes a hash value for the given type t such that | ||||
| // Identical(t, t') => Hash(t) == Hash(t'). | ||||
| func (h Hasher) Hash(t types.Type) uint32 { | ||||
| 	hash, ok := h.memo[t] | ||||
| 	if !ok { | ||||
| 		hash = h.hashFor(t) | ||||
| 		h.memo[t] = hash | ||||
| 	} | ||||
| 	return hash | ||||
| } | ||||
|  | ||||
| // hashString computes the Fowler–Noll–Vo hash of s. | ||||
| func hashString(s string) uint32 { | ||||
| 	var h uint32 | ||||
| 	for i := 0; i < len(s); i++ { | ||||
| 		h ^= uint32(s[i]) | ||||
| 		h *= 16777619 | ||||
| 	} | ||||
| 	return h | ||||
| } | ||||
|  | ||||
| // hashFor computes the hash of t. | ||||
| func (h Hasher) hashFor(t types.Type) uint32 { | ||||
| 	// See Identical for rationale. | ||||
| 	switch t := t.(type) { | ||||
| 	case *types.Basic: | ||||
| 		return uint32(t.Kind()) | ||||
|  | ||||
| 	case *types.Array: | ||||
| 		return 9043 + 2*uint32(t.Len()) + 3*h.Hash(t.Elem()) | ||||
|  | ||||
| 	case *types.Slice: | ||||
| 		return 9049 + 2*h.Hash(t.Elem()) | ||||
|  | ||||
| 	case *types.Struct: | ||||
| 		var hash uint32 = 9059 | ||||
| 		for i, n := 0, t.NumFields(); i < n; i++ { | ||||
| 			f := t.Field(i) | ||||
| 			if f.Anonymous() { | ||||
| 				hash += 8861 | ||||
| 			} | ||||
| 			hash += hashString(t.Tag(i)) | ||||
| 			hash += hashString(f.Name()) // (ignore f.Pkg) | ||||
| 			hash += h.Hash(f.Type()) | ||||
| 		} | ||||
| 		return hash | ||||
|  | ||||
| 	case *types.Pointer: | ||||
| 		return 9067 + 2*h.Hash(t.Elem()) | ||||
|  | ||||
| 	case *types.Signature: | ||||
| 		var hash uint32 = 9091 | ||||
| 		if t.Variadic() { | ||||
| 			hash *= 8863 | ||||
| 		} | ||||
| 		return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results()) | ||||
|  | ||||
| 	case *types.Interface: | ||||
| 		var hash uint32 = 9103 | ||||
| 		for i, n := 0, t.NumMethods(); i < n; i++ { | ||||
| 			// See go/types.identicalMethods for rationale. | ||||
| 			// Method order is not significant. | ||||
| 			// Ignore m.Pkg(). | ||||
| 			m := t.Method(i) | ||||
| 			hash += 3*hashString(m.Name()) + 5*h.Hash(m.Type()) | ||||
| 		} | ||||
| 		return hash | ||||
|  | ||||
| 	case *types.Map: | ||||
| 		return 9109 + 2*h.Hash(t.Key()) + 3*h.Hash(t.Elem()) | ||||
|  | ||||
| 	case *types.Chan: | ||||
| 		return 9127 + 2*uint32(t.Dir()) + 3*h.Hash(t.Elem()) | ||||
|  | ||||
| 	case *types.Named: | ||||
| 		// Not safe with a copying GC; objects may move. | ||||
| 		return uint32(reflect.ValueOf(t.Obj()).Pointer()) | ||||
|  | ||||
| 	case *types.Tuple: | ||||
| 		return h.hashTuple(t) | ||||
| 	} | ||||
| 	panic(t) | ||||
| } | ||||
|  | ||||
| func (h Hasher) hashTuple(tuple *types.Tuple) uint32 { | ||||
| 	// See go/types.identicalTypes for rationale. | ||||
| 	n := tuple.Len() | ||||
| 	var hash uint32 = 9137 + 2*uint32(n) | ||||
| 	for i := 0; i < n; i++ { | ||||
| 		hash += 3 * h.Hash(tuple.At(i).Type()) | ||||
| 	} | ||||
| 	return hash | ||||
| } | ||||
							
								
								
									
										72
									
								
								vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										72
									
								
								vendor/golang.org/x/tools/go/types/typeutil/methodsetcache.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,72 @@ | ||||
| // Copyright 2014 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| // This file implements a cache of method sets. | ||||
|  | ||||
| package typeutil | ||||
|  | ||||
| import ( | ||||
| 	"go/types" | ||||
| 	"sync" | ||||
| ) | ||||
|  | ||||
| // A MethodSetCache records the method set of each type T for which | ||||
| // MethodSet(T) is called so that repeat queries are fast. | ||||
| // The zero value is a ready-to-use cache instance. | ||||
| type MethodSetCache struct { | ||||
| 	mu     sync.Mutex | ||||
| 	named  map[*types.Named]struct{ value, pointer *types.MethodSet } // method sets for named N and *N | ||||
| 	others map[types.Type]*types.MethodSet                            // all other types | ||||
| } | ||||
|  | ||||
| // MethodSet returns the method set of type T.  It is thread-safe. | ||||
| // | ||||
| // If cache is nil, this function is equivalent to types.NewMethodSet(T). | ||||
| // Utility functions can thus expose an optional *MethodSetCache | ||||
| // parameter to clients that care about performance. | ||||
| // | ||||
| func (cache *MethodSetCache) MethodSet(T types.Type) *types.MethodSet { | ||||
| 	if cache == nil { | ||||
| 		return types.NewMethodSet(T) | ||||
| 	} | ||||
| 	cache.mu.Lock() | ||||
| 	defer cache.mu.Unlock() | ||||
|  | ||||
| 	switch T := T.(type) { | ||||
| 	case *types.Named: | ||||
| 		return cache.lookupNamed(T).value | ||||
|  | ||||
| 	case *types.Pointer: | ||||
| 		if N, ok := T.Elem().(*types.Named); ok { | ||||
| 			return cache.lookupNamed(N).pointer | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// all other types | ||||
| 	// (The map uses pointer equivalence, not type identity.) | ||||
| 	mset := cache.others[T] | ||||
| 	if mset == nil { | ||||
| 		mset = types.NewMethodSet(T) | ||||
| 		if cache.others == nil { | ||||
| 			cache.others = make(map[types.Type]*types.MethodSet) | ||||
| 		} | ||||
| 		cache.others[T] = mset | ||||
| 	} | ||||
| 	return mset | ||||
| } | ||||
|  | ||||
| func (cache *MethodSetCache) lookupNamed(named *types.Named) struct{ value, pointer *types.MethodSet } { | ||||
| 	if cache.named == nil { | ||||
| 		cache.named = make(map[*types.Named]struct{ value, pointer *types.MethodSet }) | ||||
| 	} | ||||
| 	// Avoid recomputing mset(*T) for each distinct Pointer | ||||
| 	// instance whose underlying type is a named type. | ||||
| 	msets, ok := cache.named[named] | ||||
| 	if !ok { | ||||
| 		msets.value = types.NewMethodSet(named) | ||||
| 		msets.pointer = types.NewMethodSet(types.NewPointer(named)) | ||||
| 		cache.named[named] = msets | ||||
| 	} | ||||
| 	return msets | ||||
| } | ||||
							
								
								
									
										52
									
								
								vendor/golang.org/x/tools/go/types/typeutil/ui.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										52
									
								
								vendor/golang.org/x/tools/go/types/typeutil/ui.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,52 @@ | ||||
| // Copyright 2014 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package typeutil | ||||
|  | ||||
| // This file defines utilities for user interfaces that display types. | ||||
|  | ||||
| import "go/types" | ||||
|  | ||||
| // IntuitiveMethodSet returns the intuitive method set of a type T, | ||||
| // which is the set of methods you can call on an addressable value of | ||||
| // that type. | ||||
| // | ||||
| // The result always contains MethodSet(T), and is exactly MethodSet(T) | ||||
| // for interface types and for pointer-to-concrete types. | ||||
| // For all other concrete types T, the result additionally | ||||
| // contains each method belonging to *T if there is no identically | ||||
| // named method on T itself. | ||||
| // | ||||
| // This corresponds to user intuition about method sets; | ||||
| // this function is intended only for user interfaces. | ||||
| // | ||||
| // The order of the result is as for types.MethodSet(T). | ||||
| // | ||||
| func IntuitiveMethodSet(T types.Type, msets *MethodSetCache) []*types.Selection { | ||||
| 	isPointerToConcrete := func(T types.Type) bool { | ||||
| 		ptr, ok := T.(*types.Pointer) | ||||
| 		return ok && !types.IsInterface(ptr.Elem()) | ||||
| 	} | ||||
|  | ||||
| 	var result []*types.Selection | ||||
| 	mset := msets.MethodSet(T) | ||||
| 	if types.IsInterface(T) || isPointerToConcrete(T) { | ||||
| 		for i, n := 0, mset.Len(); i < n; i++ { | ||||
| 			result = append(result, mset.At(i)) | ||||
| 		} | ||||
| 	} else { | ||||
| 		// T is some other concrete type. | ||||
| 		// Report methods of T and *T, preferring those of T. | ||||
| 		pmset := msets.MethodSet(types.NewPointer(T)) | ||||
| 		for i, n := 0, pmset.Len(); i < n; i++ { | ||||
| 			meth := pmset.At(i) | ||||
| 			if m := mset.Lookup(meth.Obj().Pkg(), meth.Obj().Name()); m != nil { | ||||
| 				meth = m | ||||
| 			} | ||||
| 			result = append(result, meth) | ||||
| 		} | ||||
|  | ||||
| 	} | ||||
| 	return result | ||||
| } | ||||
							
								
								
									
										20
									
								
								vendor/honnef.co/go/tools/LICENSE
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										20
									
								
								vendor/honnef.co/go/tools/LICENSE
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,20 @@ | ||||
| Copyright (c) 2016 Dominik Honnef | ||||
|  | ||||
| Permission is hereby granted, free of charge, to any person obtaining | ||||
| a copy of this software and associated documentation files (the | ||||
| "Software"), to deal in the Software without restriction, including | ||||
| without limitation the rights to use, copy, modify, merge, publish, | ||||
| distribute, sublicense, and/or sell copies of the Software, and to | ||||
| permit persons to whom the Software is furnished to do so, subject to | ||||
| the following conditions: | ||||
|  | ||||
| The above copyright notice and this permission notice shall be | ||||
| included in all copies or substantial portions of the Software. | ||||
|  | ||||
| THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||||
| EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||||
| MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||||
| NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE | ||||
| LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | ||||
| OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | ||||
| WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||||
							
								
								
									
										226
									
								
								vendor/honnef.co/go/tools/LICENSE-THIRD-PARTY
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										226
									
								
								vendor/honnef.co/go/tools/LICENSE-THIRD-PARTY
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,226 @@ | ||||
| Staticcheck and its related tools make use of third party projects, | ||||
| either by reusing their code, or by statically linking them into | ||||
| resulting binaries. These projects are: | ||||
|  | ||||
| * The Go Programming Language - https://golang.org/ | ||||
|  | ||||
|     Copyright (c) 2009 The Go Authors. All rights reserved. | ||||
|  | ||||
|     Redistribution and use in source and binary forms, with or without | ||||
|     modification, are permitted provided that the following conditions are | ||||
|     met: | ||||
|  | ||||
|        * Redistributions of source code must retain the above copyright | ||||
|     notice, this list of conditions and the following disclaimer. | ||||
|        * Redistributions in binary form must reproduce the above | ||||
|     copyright notice, this list of conditions and the following disclaimer | ||||
|     in the documentation and/or other materials provided with the | ||||
|     distribution. | ||||
|        * Neither the name of Google Inc. nor the names of its | ||||
|     contributors may be used to endorse or promote products derived from | ||||
|     this software without specific prior written permission. | ||||
|  | ||||
|     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||||
|     "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||||
|     LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||||
|     A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||||
|     OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||||
|     SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||||
|     LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||||
|     DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||||
|     THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||||
|     (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||||
|     OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||||
|  | ||||
|  | ||||
| * github.com/BurntSushi/toml - https://github.com/BurntSushi/toml | ||||
|  | ||||
|     The MIT License (MIT) | ||||
|  | ||||
|     Copyright (c) 2013 TOML authors | ||||
|  | ||||
|     Permission is hereby granted, free of charge, to any person obtaining a copy | ||||
|     of this software and associated documentation files (the "Software"), to deal | ||||
|     in the Software without restriction, including without limitation the rights | ||||
|     to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | ||||
|     copies of the Software, and to permit persons to whom the Software is | ||||
|     furnished to do so, subject to the following conditions: | ||||
|  | ||||
|     The above copyright notice and this permission notice shall be included in | ||||
|     all copies or substantial portions of the Software. | ||||
|  | ||||
|     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||||
|     IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||||
|     FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||||
|     AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||||
|     LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||||
|     OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | ||||
|     THE SOFTWARE. | ||||
|  | ||||
|  | ||||
| * github.com/google/renameio - https://github.com/google/renameio | ||||
|  | ||||
|     Copyright 2018 Google Inc. | ||||
|  | ||||
|     Licensed under the Apache License, Version 2.0 (the "License"); | ||||
|     you may not use this file except in compliance with the License. | ||||
|     You may obtain a copy of the License at | ||||
|  | ||||
|          http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
|     Unless required by applicable law or agreed to in writing, software | ||||
|     distributed under the License is distributed on an "AS IS" BASIS, | ||||
|     WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
|     See the License for the specific language governing permissions and | ||||
|     limitations under the License. | ||||
|  | ||||
|  | ||||
| * github.com/kisielk/gotool – https://github.com/kisielk/gotool | ||||
|  | ||||
|     Copyright (c) 2013 Kamil Kisiel <kamil@kamilkisiel.net> | ||||
|  | ||||
|     Permission is hereby granted, free of charge, to any person obtaining | ||||
|     a copy of this software and associated documentation files (the | ||||
|     "Software"), to deal in the Software without restriction, including | ||||
|     without limitation the rights to use, copy, modify, merge, publish, | ||||
|     distribute, sublicense, and/or sell copies of the Software, and to | ||||
|     permit persons to whom the Software is furnished to do so, subject to | ||||
|     the following conditions: | ||||
|  | ||||
|     The above copyright notice and this permission notice shall be | ||||
|     included in all copies or substantial portions of the Software. | ||||
|  | ||||
|     THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||||
|     EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||||
|     MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||||
|     NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE | ||||
|     LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION | ||||
|     OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION | ||||
|     WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. | ||||
|  | ||||
|     All the files in this distribution are covered under either the MIT | ||||
|     license (see the file LICENSE) except some files mentioned below. | ||||
|  | ||||
|     match.go, match_test.go: | ||||
|  | ||||
|         Copyright (c) 2009 The Go Authors. All rights reserved. | ||||
|  | ||||
|         Redistribution and use in source and binary forms, with or without | ||||
|         modification, are permitted provided that the following conditions are | ||||
|         met: | ||||
|  | ||||
|            * Redistributions of source code must retain the above copyright | ||||
|         notice, this list of conditions and the following disclaimer. | ||||
|            * Redistributions in binary form must reproduce the above | ||||
|         copyright notice, this list of conditions and the following disclaimer | ||||
|         in the documentation and/or other materials provided with the | ||||
|         distribution. | ||||
|            * Neither the name of Google Inc. nor the names of its | ||||
|         contributors may be used to endorse or promote products derived from | ||||
|         this software without specific prior written permission. | ||||
|  | ||||
|         THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||||
|         "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||||
|         LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||||
|         A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||||
|         OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||||
|         SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||||
|         LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||||
|         DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||||
|         THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||||
|         (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||||
|         OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||||
|  | ||||
|  | ||||
| * github.com/rogpeppe/go-internal - https://github.com/rogpeppe/go-internal | ||||
|  | ||||
|     Copyright (c) 2018 The Go Authors. All rights reserved. | ||||
|  | ||||
|     Redistribution and use in source and binary forms, with or without | ||||
|     modification, are permitted provided that the following conditions are | ||||
|     met: | ||||
|  | ||||
|        * Redistributions of source code must retain the above copyright | ||||
|     notice, this list of conditions and the following disclaimer. | ||||
|        * Redistributions in binary form must reproduce the above | ||||
|     copyright notice, this list of conditions and the following disclaimer | ||||
|     in the documentation and/or other materials provided with the | ||||
|     distribution. | ||||
|        * Neither the name of Google Inc. nor the names of its | ||||
|     contributors may be used to endorse or promote products derived from | ||||
|     this software without specific prior written permission. | ||||
|  | ||||
|     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||||
|     "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||||
|     LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||||
|     A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||||
|     OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||||
|     SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||||
|     LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||||
|     DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||||
|     THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||||
|     (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||||
|     OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||||
|  | ||||
|  | ||||
| * golang.org/x/mod/module - https://github.com/golang/mod | ||||
|  | ||||
|     Copyright (c) 2009 The Go Authors. All rights reserved. | ||||
|  | ||||
|     Redistribution and use in source and binary forms, with or without | ||||
|     modification, are permitted provided that the following conditions are | ||||
|     met: | ||||
|  | ||||
|        * Redistributions of source code must retain the above copyright | ||||
|     notice, this list of conditions and the following disclaimer. | ||||
|        * Redistributions in binary form must reproduce the above | ||||
|     copyright notice, this list of conditions and the following disclaimer | ||||
|     in the documentation and/or other materials provided with the | ||||
|     distribution. | ||||
|        * Neither the name of Google Inc. nor the names of its | ||||
|     contributors may be used to endorse or promote products derived from | ||||
|     this software without specific prior written permission. | ||||
|  | ||||
|     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||||
|     "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||||
|     LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||||
|     A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||||
|     OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||||
|     SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||||
|     LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||||
|     DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||||
|     THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||||
|     (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||||
|     OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||||
|  | ||||
|  | ||||
| * golang.org/x/tools/go/analysis - https://github.com/golang/tools | ||||
|  | ||||
|     Copyright (c) 2009 The Go Authors. All rights reserved. | ||||
|  | ||||
|     Redistribution and use in source and binary forms, with or without | ||||
|     modification, are permitted provided that the following conditions are | ||||
|     met: | ||||
|  | ||||
|        * Redistributions of source code must retain the above copyright | ||||
|     notice, this list of conditions and the following disclaimer. | ||||
|        * Redistributions in binary form must reproduce the above | ||||
|     copyright notice, this list of conditions and the following disclaimer | ||||
|     in the documentation and/or other materials provided with the | ||||
|     distribution. | ||||
|        * Neither the name of Google Inc. nor the names of its | ||||
|     contributors may be used to endorse or promote products derived from | ||||
|     this software without specific prior written permission. | ||||
|  | ||||
|     THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||||
|     "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||||
|     LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||||
|     A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||||
|     OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||||
|     SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||||
|     LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||||
|     DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||||
|     THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||||
|     (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||||
|     OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||||
|  | ||||
							
								
								
									
										23
									
								
								vendor/honnef.co/go/tools/arg/BUILD
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										23
									
								
								vendor/honnef.co/go/tools/arg/BUILD
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,23 @@ | ||||
| load("@io_bazel_rules_go//go:def.bzl", "go_library") | ||||
|  | ||||
| go_library( | ||||
|     name = "go_default_library", | ||||
|     srcs = ["arg.go"], | ||||
|     importmap = "k8s.io/kubernetes/vendor/honnef.co/go/tools/arg", | ||||
|     importpath = "honnef.co/go/tools/arg", | ||||
|     visibility = ["//visibility:public"], | ||||
| ) | ||||
|  | ||||
| filegroup( | ||||
|     name = "package-srcs", | ||||
|     srcs = glob(["**"]), | ||||
|     tags = ["automanaged"], | ||||
|     visibility = ["//visibility:private"], | ||||
| ) | ||||
|  | ||||
| filegroup( | ||||
|     name = "all-srcs", | ||||
|     srcs = [":package-srcs"], | ||||
|     tags = ["automanaged"], | ||||
|     visibility = ["//visibility:public"], | ||||
| ) | ||||
							
								
								
									
										48
									
								
								vendor/honnef.co/go/tools/arg/arg.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										48
									
								
								vendor/honnef.co/go/tools/arg/arg.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,48 @@ | ||||
| package arg | ||||
|  | ||||
| var args = map[string]int{ | ||||
| 	"(*encoding/json.Decoder).Decode.v":    0, | ||||
| 	"(*encoding/json.Encoder).Encode.v":    0, | ||||
| 	"(*encoding/xml.Decoder).Decode.v":     0, | ||||
| 	"(*encoding/xml.Encoder).Encode.v":     0, | ||||
| 	"(*sync.Pool).Put.x":                   0, | ||||
| 	"(*text/template.Template).Parse.text": 0, | ||||
| 	"(io.Seeker).Seek.offset":              0, | ||||
| 	"(time.Time).Sub.u":                    0, | ||||
| 	"append.elems":                         1, | ||||
| 	"append.slice":                         0, | ||||
| 	"bytes.Equal.a":                        0, | ||||
| 	"bytes.Equal.b":                        1, | ||||
| 	"encoding/binary.Write.data":           2, | ||||
| 	"errors.New.text":                      0, | ||||
| 	"fmt.Fprintf.format":                   1, | ||||
| 	"fmt.Printf.format":                    0, | ||||
| 	"fmt.Sprintf.a[0]":                     1, | ||||
| 	"fmt.Sprintf.format":                   0, | ||||
| 	"json.Marshal.v":                       0, | ||||
| 	"json.Unmarshal.v":                     1, | ||||
| 	"len.v":                                0, | ||||
| 	"make.size[0]":                         1, | ||||
| 	"make.size[1]":                         2, | ||||
| 	"make.t":                               0, | ||||
| 	"net/url.Parse.rawurl":                 0, | ||||
| 	"os.OpenFile.flag":                     1, | ||||
| 	"os/exec.Command.name":                 0, | ||||
| 	"os/signal.Notify.c":                   0, | ||||
| 	"regexp.Compile.expr":                  0, | ||||
| 	"runtime.SetFinalizer.finalizer":       1, | ||||
| 	"runtime.SetFinalizer.obj":             0, | ||||
| 	"sort.Sort.data":                       0, | ||||
| 	"time.Parse.layout":                    0, | ||||
| 	"time.Sleep.d":                         0, | ||||
| 	"xml.Marshal.v":                        0, | ||||
| 	"xml.Unmarshal.v":                      1, | ||||
| } | ||||
|  | ||||
| func Arg(name string) int { | ||||
| 	n, ok := args[name] | ||||
| 	if !ok { | ||||
| 		panic("unknown argument " + name) | ||||
| 	} | ||||
| 	return n | ||||
| } | ||||
							
								
								
									
										39
									
								
								vendor/honnef.co/go/tools/cmd/staticcheck/BUILD
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										39
									
								
								vendor/honnef.co/go/tools/cmd/staticcheck/BUILD
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,39 @@ | ||||
| load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library") | ||||
|  | ||||
| go_library( | ||||
|     name = "go_default_library", | ||||
|     srcs = ["staticcheck.go"], | ||||
|     importmap = "k8s.io/kubernetes/vendor/honnef.co/go/tools/cmd/staticcheck", | ||||
|     importpath = "honnef.co/go/tools/cmd/staticcheck", | ||||
|     visibility = ["//visibility:private"], | ||||
|     deps = [ | ||||
|         "//vendor/golang.org/x/tools/go/analysis:go_default_library", | ||||
|         "//vendor/honnef.co/go/tools/lint:go_default_library", | ||||
|         "//vendor/honnef.co/go/tools/lint/lintutil:go_default_library", | ||||
|         "//vendor/honnef.co/go/tools/simple:go_default_library", | ||||
|         "//vendor/honnef.co/go/tools/staticcheck:go_default_library", | ||||
|         "//vendor/honnef.co/go/tools/stylecheck:go_default_library", | ||||
|         "//vendor/honnef.co/go/tools/unused:go_default_library", | ||||
|     ], | ||||
| ) | ||||
|  | ||||
| go_binary( | ||||
|     name = "staticcheck", | ||||
|     embed = [":go_default_library"], | ||||
|     tags = ["manual"], | ||||
|     visibility = ["//visibility:public"], | ||||
| ) | ||||
|  | ||||
| filegroup( | ||||
|     name = "package-srcs", | ||||
|     srcs = glob(["**"]), | ||||
|     tags = ["automanaged"], | ||||
|     visibility = ["//visibility:private"], | ||||
| ) | ||||
|  | ||||
| filegroup( | ||||
|     name = "all-srcs", | ||||
|     srcs = [":package-srcs"], | ||||
|     tags = ["automanaged"], | ||||
|     visibility = ["//visibility:public"], | ||||
| ) | ||||
							
								
								
									
										15
									
								
								vendor/honnef.co/go/tools/cmd/staticcheck/README.md
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										15
									
								
								vendor/honnef.co/go/tools/cmd/staticcheck/README.md
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,15 @@ | ||||
| # staticcheck | ||||
|  | ||||
| _staticcheck_ offers extensive analysis of Go code, covering a myriad | ||||
| of categories. It will detect bugs, suggest code simplifications, | ||||
| point out dead code, and more. | ||||
|  | ||||
| ## Installation | ||||
|  | ||||
| See [the main README](https://github.com/dominikh/go-tools#installation) for installation instructions. | ||||
|  | ||||
| ## Documentation | ||||
|  | ||||
| Detailed documentation can be found on | ||||
| [staticcheck.io](https://staticcheck.io/docs/). | ||||
|  | ||||
							
								
								
									
										44
									
								
								vendor/honnef.co/go/tools/cmd/staticcheck/staticcheck.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										44
									
								
								vendor/honnef.co/go/tools/cmd/staticcheck/staticcheck.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,44 @@ | ||||
| // staticcheck analyses Go code and makes it better. | ||||
| package main // import "honnef.co/go/tools/cmd/staticcheck" | ||||
|  | ||||
| import ( | ||||
| 	"log" | ||||
| 	"os" | ||||
|  | ||||
| 	"golang.org/x/tools/go/analysis" | ||||
| 	"honnef.co/go/tools/lint" | ||||
| 	"honnef.co/go/tools/lint/lintutil" | ||||
| 	"honnef.co/go/tools/simple" | ||||
| 	"honnef.co/go/tools/staticcheck" | ||||
| 	"honnef.co/go/tools/stylecheck" | ||||
| 	"honnef.co/go/tools/unused" | ||||
| ) | ||||
|  | ||||
| func main() { | ||||
| 	fs := lintutil.FlagSet("staticcheck") | ||||
| 	wholeProgram := fs.Bool("unused.whole-program", false, "Run unused in whole program mode") | ||||
| 	debug := fs.String("debug.unused-graph", "", "Write unused's object graph to `file`") | ||||
| 	fs.Parse(os.Args[1:]) | ||||
|  | ||||
| 	var cs []*analysis.Analyzer | ||||
| 	for _, v := range simple.Analyzers { | ||||
| 		cs = append(cs, v) | ||||
| 	} | ||||
| 	for _, v := range staticcheck.Analyzers { | ||||
| 		cs = append(cs, v) | ||||
| 	} | ||||
| 	for _, v := range stylecheck.Analyzers { | ||||
| 		cs = append(cs, v) | ||||
| 	} | ||||
|  | ||||
| 	u := unused.NewChecker(*wholeProgram) | ||||
| 	if *debug != "" { | ||||
| 		f, err := os.OpenFile(*debug, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0666) | ||||
| 		if err != nil { | ||||
| 			log.Fatal(err) | ||||
| 		} | ||||
| 		u.Debug = f | ||||
| 	} | ||||
| 	cums := []lint.CumulativeChecker{u} | ||||
| 	lintutil.ProcessFlagSet(cs, cums, fs) | ||||
| } | ||||
							
								
								
									
										27
									
								
								vendor/honnef.co/go/tools/config/BUILD
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										27
									
								
								vendor/honnef.co/go/tools/config/BUILD
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,27 @@ | ||||
| load("@io_bazel_rules_go//go:def.bzl", "go_library") | ||||
|  | ||||
| go_library( | ||||
|     name = "go_default_library", | ||||
|     srcs = ["config.go"], | ||||
|     importmap = "k8s.io/kubernetes/vendor/honnef.co/go/tools/config", | ||||
|     importpath = "honnef.co/go/tools/config", | ||||
|     visibility = ["//visibility:public"], | ||||
|     deps = [ | ||||
|         "//vendor/github.com/BurntSushi/toml:go_default_library", | ||||
|         "//vendor/golang.org/x/tools/go/analysis:go_default_library", | ||||
|     ], | ||||
| ) | ||||
|  | ||||
| filegroup( | ||||
|     name = "package-srcs", | ||||
|     srcs = glob(["**"]), | ||||
|     tags = ["automanaged"], | ||||
|     visibility = ["//visibility:private"], | ||||
| ) | ||||
|  | ||||
| filegroup( | ||||
|     name = "all-srcs", | ||||
|     srcs = [":package-srcs"], | ||||
|     tags = ["automanaged"], | ||||
|     visibility = ["//visibility:public"], | ||||
| ) | ||||
							
								
								
									
										224
									
								
								vendor/honnef.co/go/tools/config/config.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										224
									
								
								vendor/honnef.co/go/tools/config/config.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,224 @@ | ||||
| package config | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"fmt" | ||||
| 	"os" | ||||
| 	"path/filepath" | ||||
| 	"reflect" | ||||
| 	"strings" | ||||
|  | ||||
| 	"github.com/BurntSushi/toml" | ||||
| 	"golang.org/x/tools/go/analysis" | ||||
| ) | ||||
|  | ||||
| var Analyzer = &analysis.Analyzer{ | ||||
| 	Name: "config", | ||||
| 	Doc:  "loads configuration for the current package tree", | ||||
| 	Run: func(pass *analysis.Pass) (interface{}, error) { | ||||
| 		if len(pass.Files) == 0 { | ||||
| 			cfg := DefaultConfig | ||||
| 			return &cfg, nil | ||||
| 		} | ||||
| 		cache, err := os.UserCacheDir() | ||||
| 		if err != nil { | ||||
| 			cache = "" | ||||
| 		} | ||||
| 		var path string | ||||
| 		for _, f := range pass.Files { | ||||
| 			p := pass.Fset.PositionFor(f.Pos(), true).Filename | ||||
| 			// FIXME(dh): using strings.HasPrefix isn't technically | ||||
| 			// correct, but it should be good enough for now. | ||||
| 			if cache != "" && strings.HasPrefix(p, cache) { | ||||
| 				// File in the build cache of the standard Go build system | ||||
| 				continue | ||||
| 			} | ||||
| 			path = p | ||||
| 			break | ||||
| 		} | ||||
|  | ||||
| 		if path == "" { | ||||
| 			// The package only consists of generated files. | ||||
| 			cfg := DefaultConfig | ||||
| 			return &cfg, nil | ||||
| 		} | ||||
|  | ||||
| 		dir := filepath.Dir(path) | ||||
| 		cfg, err := Load(dir) | ||||
| 		if err != nil { | ||||
| 			return nil, fmt.Errorf("error loading staticcheck.conf: %s", err) | ||||
| 		} | ||||
| 		return &cfg, nil | ||||
| 	}, | ||||
| 	RunDespiteErrors: true, | ||||
| 	ResultType:       reflect.TypeOf((*Config)(nil)), | ||||
| } | ||||
|  | ||||
| func For(pass *analysis.Pass) *Config { | ||||
| 	return pass.ResultOf[Analyzer].(*Config) | ||||
| } | ||||
|  | ||||
| func mergeLists(a, b []string) []string { | ||||
| 	out := make([]string, 0, len(a)+len(b)) | ||||
| 	for _, el := range b { | ||||
| 		if el == "inherit" { | ||||
| 			out = append(out, a...) | ||||
| 		} else { | ||||
| 			out = append(out, el) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return out | ||||
| } | ||||
|  | ||||
| func normalizeList(list []string) []string { | ||||
| 	if len(list) > 1 { | ||||
| 		nlist := make([]string, 0, len(list)) | ||||
| 		nlist = append(nlist, list[0]) | ||||
| 		for i, el := range list[1:] { | ||||
| 			if el != list[i] { | ||||
| 				nlist = append(nlist, el) | ||||
| 			} | ||||
| 		} | ||||
| 		list = nlist | ||||
| 	} | ||||
|  | ||||
| 	for _, el := range list { | ||||
| 		if el == "inherit" { | ||||
| 			// This should never happen, because the default config | ||||
| 			// should not use "inherit" | ||||
| 			panic(`unresolved "inherit"`) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return list | ||||
| } | ||||
|  | ||||
| func (cfg Config) Merge(ocfg Config) Config { | ||||
| 	if ocfg.Checks != nil { | ||||
| 		cfg.Checks = mergeLists(cfg.Checks, ocfg.Checks) | ||||
| 	} | ||||
| 	if ocfg.Initialisms != nil { | ||||
| 		cfg.Initialisms = mergeLists(cfg.Initialisms, ocfg.Initialisms) | ||||
| 	} | ||||
| 	if ocfg.DotImportWhitelist != nil { | ||||
| 		cfg.DotImportWhitelist = mergeLists(cfg.DotImportWhitelist, ocfg.DotImportWhitelist) | ||||
| 	} | ||||
| 	if ocfg.HTTPStatusCodeWhitelist != nil { | ||||
| 		cfg.HTTPStatusCodeWhitelist = mergeLists(cfg.HTTPStatusCodeWhitelist, ocfg.HTTPStatusCodeWhitelist) | ||||
| 	} | ||||
| 	return cfg | ||||
| } | ||||
|  | ||||
| type Config struct { | ||||
| 	// TODO(dh): this implementation makes it impossible for external | ||||
| 	// clients to add their own checkers with configuration. At the | ||||
| 	// moment, we don't really care about that; we don't encourage | ||||
| 	// that people use this package. In the future, we may. The | ||||
| 	// obvious solution would be using map[string]interface{}, but | ||||
| 	// that's obviously subpar. | ||||
|  | ||||
| 	Checks                  []string `toml:"checks"` | ||||
| 	Initialisms             []string `toml:"initialisms"` | ||||
| 	DotImportWhitelist      []string `toml:"dot_import_whitelist"` | ||||
| 	HTTPStatusCodeWhitelist []string `toml:"http_status_code_whitelist"` | ||||
| } | ||||
|  | ||||
| func (c Config) String() string { | ||||
| 	buf := &bytes.Buffer{} | ||||
|  | ||||
| 	fmt.Fprintf(buf, "Checks: %#v\n", c.Checks) | ||||
| 	fmt.Fprintf(buf, "Initialisms: %#v\n", c.Initialisms) | ||||
| 	fmt.Fprintf(buf, "DotImportWhitelist: %#v\n", c.DotImportWhitelist) | ||||
| 	fmt.Fprintf(buf, "HTTPStatusCodeWhitelist: %#v", c.HTTPStatusCodeWhitelist) | ||||
|  | ||||
| 	return buf.String() | ||||
| } | ||||
|  | ||||
| var DefaultConfig = Config{ | ||||
| 	Checks: []string{"all", "-ST1000", "-ST1003", "-ST1016"}, | ||||
| 	Initialisms: []string{ | ||||
| 		"ACL", "API", "ASCII", "CPU", "CSS", "DNS", | ||||
| 		"EOF", "GUID", "HTML", "HTTP", "HTTPS", "ID", | ||||
| 		"IP", "JSON", "QPS", "RAM", "RPC", "SLA", | ||||
| 		"SMTP", "SQL", "SSH", "TCP", "TLS", "TTL", | ||||
| 		"UDP", "UI", "GID", "UID", "UUID", "URI", | ||||
| 		"URL", "UTF8", "VM", "XML", "XMPP", "XSRF", | ||||
| 		"XSS", "SIP", "RTP", | ||||
| 	}, | ||||
| 	DotImportWhitelist:      []string{}, | ||||
| 	HTTPStatusCodeWhitelist: []string{"200", "400", "404", "500"}, | ||||
| } | ||||
|  | ||||
| const configName = "staticcheck.conf" | ||||
|  | ||||
| func parseConfigs(dir string) ([]Config, error) { | ||||
| 	var out []Config | ||||
|  | ||||
| 	// TODO(dh): consider stopping at the GOPATH/module boundary | ||||
| 	for dir != "" { | ||||
| 		f, err := os.Open(filepath.Join(dir, configName)) | ||||
| 		if os.IsNotExist(err) { | ||||
| 			ndir := filepath.Dir(dir) | ||||
| 			if ndir == dir { | ||||
| 				break | ||||
| 			} | ||||
| 			dir = ndir | ||||
| 			continue | ||||
| 		} | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		var cfg Config | ||||
| 		_, err = toml.DecodeReader(f, &cfg) | ||||
| 		f.Close() | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		out = append(out, cfg) | ||||
| 		ndir := filepath.Dir(dir) | ||||
| 		if ndir == dir { | ||||
| 			break | ||||
| 		} | ||||
| 		dir = ndir | ||||
| 	} | ||||
| 	out = append(out, DefaultConfig) | ||||
| 	if len(out) < 2 { | ||||
| 		return out, nil | ||||
| 	} | ||||
| 	for i := 0; i < len(out)/2; i++ { | ||||
| 		out[i], out[len(out)-1-i] = out[len(out)-1-i], out[i] | ||||
| 	} | ||||
| 	return out, nil | ||||
| } | ||||
|  | ||||
| func mergeConfigs(confs []Config) Config { | ||||
| 	if len(confs) == 0 { | ||||
| 		// This shouldn't happen because we always have at least a | ||||
| 		// default config. | ||||
| 		panic("trying to merge zero configs") | ||||
| 	} | ||||
| 	if len(confs) == 1 { | ||||
| 		return confs[0] | ||||
| 	} | ||||
| 	conf := confs[0] | ||||
| 	for _, oconf := range confs[1:] { | ||||
| 		conf = conf.Merge(oconf) | ||||
| 	} | ||||
| 	return conf | ||||
| } | ||||
|  | ||||
| func Load(dir string) (Config, error) { | ||||
| 	confs, err := parseConfigs(dir) | ||||
| 	if err != nil { | ||||
| 		return Config{}, err | ||||
| 	} | ||||
| 	conf := mergeConfigs(confs) | ||||
|  | ||||
| 	conf.Checks = normalizeList(conf.Checks) | ||||
| 	conf.Initialisms = normalizeList(conf.Initialisms) | ||||
| 	conf.DotImportWhitelist = normalizeList(conf.DotImportWhitelist) | ||||
| 	conf.HTTPStatusCodeWhitelist = normalizeList(conf.HTTPStatusCodeWhitelist) | ||||
|  | ||||
| 	return conf, nil | ||||
| } | ||||
							
								
								
									
										10
									
								
								vendor/honnef.co/go/tools/config/example.conf
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										10
									
								
								vendor/honnef.co/go/tools/config/example.conf
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,10 @@ | ||||
| checks = ["all", "-ST1003", "-ST1014"] | ||||
| initialisms = ["ACL", "API", "ASCII", "CPU", "CSS", "DNS", | ||||
| 	"EOF", "GUID", "HTML", "HTTP", "HTTPS", "ID", | ||||
| 	"IP", "JSON", "QPS", "RAM", "RPC", "SLA", | ||||
| 	"SMTP", "SQL", "SSH", "TCP", "TLS", "TTL", | ||||
| 	"UDP", "UI", "GID", "UID", "UUID", "URI", | ||||
| 	"URL", "UTF8", "VM", "XML", "XMPP", "XSRF", | ||||
| 	"XSS", "SIP", "RTP"] | ||||
| dot_import_whitelist = [] | ||||
| http_status_code_whitelist = ["200", "400", "404", "500"] | ||||
							
								
								
									
										23
									
								
								vendor/honnef.co/go/tools/deprecated/BUILD
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										23
									
								
								vendor/honnef.co/go/tools/deprecated/BUILD
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,23 @@ | ||||
| load("@io_bazel_rules_go//go:def.bzl", "go_library") | ||||
|  | ||||
| go_library( | ||||
|     name = "go_default_library", | ||||
|     srcs = ["stdlib.go"], | ||||
|     importmap = "k8s.io/kubernetes/vendor/honnef.co/go/tools/deprecated", | ||||
|     importpath = "honnef.co/go/tools/deprecated", | ||||
|     visibility = ["//visibility:public"], | ||||
| ) | ||||
|  | ||||
| filegroup( | ||||
|     name = "package-srcs", | ||||
|     srcs = glob(["**"]), | ||||
|     tags = ["automanaged"], | ||||
|     visibility = ["//visibility:private"], | ||||
| ) | ||||
|  | ||||
| filegroup( | ||||
|     name = "all-srcs", | ||||
|     srcs = [":package-srcs"], | ||||
|     tags = ["automanaged"], | ||||
|     visibility = ["//visibility:public"], | ||||
| ) | ||||
							
								
								
									
										112
									
								
								vendor/honnef.co/go/tools/deprecated/stdlib.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										112
									
								
								vendor/honnef.co/go/tools/deprecated/stdlib.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,112 @@ | ||||
| package deprecated | ||||
|  | ||||
| type Deprecation struct { | ||||
| 	DeprecatedSince           int | ||||
| 	AlternativeAvailableSince int | ||||
| } | ||||
|  | ||||
| var Stdlib = map[string]Deprecation{ | ||||
| 	"image/jpeg.Reader": {4, 0}, | ||||
| 	// FIXME(dh): AllowBinary isn't being detected as deprecated | ||||
| 	// because the comment has a newline right after "Deprecated:" | ||||
| 	"go/build.AllowBinary":                        {7, 7}, | ||||
| 	"(archive/zip.FileHeader).CompressedSize":     {1, 1}, | ||||
| 	"(archive/zip.FileHeader).UncompressedSize":   {1, 1}, | ||||
| 	"(archive/zip.FileHeader).ModifiedTime":       {10, 10}, | ||||
| 	"(archive/zip.FileHeader).ModifiedDate":       {10, 10}, | ||||
| 	"(*archive/zip.FileHeader).ModTime":           {10, 10}, | ||||
| 	"(*archive/zip.FileHeader).SetModTime":        {10, 10}, | ||||
| 	"(go/doc.Package).Bugs":                       {1, 1}, | ||||
| 	"os.SEEK_SET":                                 {7, 7}, | ||||
| 	"os.SEEK_CUR":                                 {7, 7}, | ||||
| 	"os.SEEK_END":                                 {7, 7}, | ||||
| 	"(net.Dialer).Cancel":                         {7, 7}, | ||||
| 	"runtime.CPUProfile":                          {9, 0}, | ||||
| 	"compress/flate.ReadError":                    {6, 6}, | ||||
| 	"compress/flate.WriteError":                   {6, 6}, | ||||
| 	"path/filepath.HasPrefix":                     {0, 0}, | ||||
| 	"(net/http.Transport).Dial":                   {7, 7}, | ||||
| 	"(*net/http.Transport).CancelRequest":         {6, 5}, | ||||
| 	"net/http.ErrWriteAfterFlush":                 {7, 0}, | ||||
| 	"net/http.ErrHeaderTooLong":                   {8, 0}, | ||||
| 	"net/http.ErrShortBody":                       {8, 0}, | ||||
| 	"net/http.ErrMissingContentLength":            {8, 0}, | ||||
| 	"net/http/httputil.ErrPersistEOF":             {0, 0}, | ||||
| 	"net/http/httputil.ErrClosed":                 {0, 0}, | ||||
| 	"net/http/httputil.ErrPipeline":               {0, 0}, | ||||
| 	"net/http/httputil.ServerConn":                {0, 0}, | ||||
| 	"net/http/httputil.NewServerConn":             {0, 0}, | ||||
| 	"net/http/httputil.ClientConn":                {0, 0}, | ||||
| 	"net/http/httputil.NewClientConn":             {0, 0}, | ||||
| 	"net/http/httputil.NewProxyClientConn":        {0, 0}, | ||||
| 	"(net/http.Request).Cancel":                   {7, 7}, | ||||
| 	"(text/template/parse.PipeNode).Line":         {1, 1}, | ||||
| 	"(text/template/parse.ActionNode).Line":       {1, 1}, | ||||
| 	"(text/template/parse.BranchNode).Line":       {1, 1}, | ||||
| 	"(text/template/parse.TemplateNode).Line":     {1, 1}, | ||||
| 	"database/sql/driver.ColumnConverter":         {9, 9}, | ||||
| 	"database/sql/driver.Execer":                  {8, 8}, | ||||
| 	"database/sql/driver.Queryer":                 {8, 8}, | ||||
| 	"(database/sql/driver.Conn).Begin":            {8, 8}, | ||||
| 	"(database/sql/driver.Stmt).Exec":             {8, 8}, | ||||
| 	"(database/sql/driver.Stmt).Query":            {8, 8}, | ||||
| 	"syscall.StringByteSlice":                     {1, 1}, | ||||
| 	"syscall.StringBytePtr":                       {1, 1}, | ||||
| 	"syscall.StringSlicePtr":                      {1, 1}, | ||||
| 	"syscall.StringToUTF16":                       {1, 1}, | ||||
| 	"syscall.StringToUTF16Ptr":                    {1, 1}, | ||||
| 	"(*regexp.Regexp).Copy":                       {12, 12}, | ||||
| 	"(archive/tar.Header).Xattrs":                 {10, 10}, | ||||
| 	"archive/tar.TypeRegA":                        {11, 1}, | ||||
| 	"go/types.NewInterface":                       {11, 11}, | ||||
| 	"(*go/types.Interface).Embedded":              {11, 11}, | ||||
| 	"go/importer.For":                             {12, 12}, | ||||
| 	"encoding/json.InvalidUTF8Error":              {2, 2}, | ||||
| 	"encoding/json.UnmarshalFieldError":           {2, 2}, | ||||
| 	"encoding/csv.ErrTrailingComma":               {2, 2}, | ||||
| 	"(encoding/csv.Reader).TrailingComma":         {2, 2}, | ||||
| 	"(net.Dialer).DualStack":                      {12, 12}, | ||||
| 	"net/http.ErrUnexpectedTrailer":               {12, 12}, | ||||
| 	"net/http.CloseNotifier":                      {11, 7}, | ||||
| 	"net/http.ProtocolError":                      {8, 8}, | ||||
| 	"(crypto/x509.CertificateRequest).Attributes": {5, 3}, | ||||
| 	// This function has no alternative, but also no purpose. | ||||
| 	"(*crypto/rc4.Cipher).Reset":                     {12, 0}, | ||||
| 	"(net/http/httptest.ResponseRecorder).HeaderMap": {11, 7}, | ||||
|  | ||||
| 	// All of these have been deprecated in favour of external libraries | ||||
| 	"syscall.AttachLsf":             {7, 0}, | ||||
| 	"syscall.DetachLsf":             {7, 0}, | ||||
| 	"syscall.LsfSocket":             {7, 0}, | ||||
| 	"syscall.SetLsfPromisc":         {7, 0}, | ||||
| 	"syscall.LsfJump":               {7, 0}, | ||||
| 	"syscall.LsfStmt":               {7, 0}, | ||||
| 	"syscall.BpfStmt":               {7, 0}, | ||||
| 	"syscall.BpfJump":               {7, 0}, | ||||
| 	"syscall.BpfBuflen":             {7, 0}, | ||||
| 	"syscall.SetBpfBuflen":          {7, 0}, | ||||
| 	"syscall.BpfDatalink":           {7, 0}, | ||||
| 	"syscall.SetBpfDatalink":        {7, 0}, | ||||
| 	"syscall.SetBpfPromisc":         {7, 0}, | ||||
| 	"syscall.FlushBpf":              {7, 0}, | ||||
| 	"syscall.BpfInterface":          {7, 0}, | ||||
| 	"syscall.SetBpfInterface":       {7, 0}, | ||||
| 	"syscall.BpfTimeout":            {7, 0}, | ||||
| 	"syscall.SetBpfTimeout":         {7, 0}, | ||||
| 	"syscall.BpfStats":              {7, 0}, | ||||
| 	"syscall.SetBpfImmediate":       {7, 0}, | ||||
| 	"syscall.SetBpf":                {7, 0}, | ||||
| 	"syscall.CheckBpfVersion":       {7, 0}, | ||||
| 	"syscall.BpfHeadercmpl":         {7, 0}, | ||||
| 	"syscall.SetBpfHeadercmpl":      {7, 0}, | ||||
| 	"syscall.RouteRIB":              {8, 0}, | ||||
| 	"syscall.RoutingMessage":        {8, 0}, | ||||
| 	"syscall.RouteMessage":          {8, 0}, | ||||
| 	"syscall.InterfaceMessage":      {8, 0}, | ||||
| 	"syscall.InterfaceAddrMessage":  {8, 0}, | ||||
| 	"syscall.ParseRoutingMessage":   {8, 0}, | ||||
| 	"syscall.ParseRoutingSockaddr":  {8, 0}, | ||||
| 	"InterfaceAnnounceMessage":      {7, 0}, | ||||
| 	"InterfaceMulticastAddrMessage": {7, 0}, | ||||
| 	"syscall.FormatMessage":         {5, 0}, | ||||
| } | ||||
							
								
								
									
										34
									
								
								vendor/honnef.co/go/tools/facts/BUILD
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										34
									
								
								vendor/honnef.co/go/tools/facts/BUILD
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,34 @@ | ||||
| load("@io_bazel_rules_go//go:def.bzl", "go_library") | ||||
|  | ||||
| go_library( | ||||
|     name = "go_default_library", | ||||
|     srcs = [ | ||||
|         "deprecated.go", | ||||
|         "generated.go", | ||||
|         "purity.go", | ||||
|         "token.go", | ||||
|     ], | ||||
|     importmap = "k8s.io/kubernetes/vendor/honnef.co/go/tools/facts", | ||||
|     importpath = "honnef.co/go/tools/facts", | ||||
|     visibility = ["//visibility:public"], | ||||
|     deps = [ | ||||
|         "//vendor/golang.org/x/tools/go/analysis:go_default_library", | ||||
|         "//vendor/honnef.co/go/tools/functions:go_default_library", | ||||
|         "//vendor/honnef.co/go/tools/internal/passes/buildssa:go_default_library", | ||||
|         "//vendor/honnef.co/go/tools/ssa:go_default_library", | ||||
|     ], | ||||
| ) | ||||
|  | ||||
| filegroup( | ||||
|     name = "package-srcs", | ||||
|     srcs = glob(["**"]), | ||||
|     tags = ["automanaged"], | ||||
|     visibility = ["//visibility:private"], | ||||
| ) | ||||
|  | ||||
| filegroup( | ||||
|     name = "all-srcs", | ||||
|     srcs = [":package-srcs"], | ||||
|     tags = ["automanaged"], | ||||
|     visibility = ["//visibility:public"], | ||||
| ) | ||||
							
								
								
									
										144
									
								
								vendor/honnef.co/go/tools/facts/deprecated.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										144
									
								
								vendor/honnef.co/go/tools/facts/deprecated.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,144 @@ | ||||
| package facts | ||||
|  | ||||
| import ( | ||||
| 	"go/ast" | ||||
| 	"go/token" | ||||
| 	"go/types" | ||||
| 	"reflect" | ||||
| 	"strings" | ||||
|  | ||||
| 	"golang.org/x/tools/go/analysis" | ||||
| ) | ||||
|  | ||||
| type IsDeprecated struct{ Msg string } | ||||
|  | ||||
| func (*IsDeprecated) AFact()           {} | ||||
| func (d *IsDeprecated) String() string { return "Deprecated: " + d.Msg } | ||||
|  | ||||
| type DeprecatedResult struct { | ||||
| 	Objects  map[types.Object]*IsDeprecated | ||||
| 	Packages map[*types.Package]*IsDeprecated | ||||
| } | ||||
|  | ||||
| var Deprecated = &analysis.Analyzer{ | ||||
| 	Name:       "fact_deprecated", | ||||
| 	Doc:        "Mark deprecated objects", | ||||
| 	Run:        deprecated, | ||||
| 	FactTypes:  []analysis.Fact{(*IsDeprecated)(nil)}, | ||||
| 	ResultType: reflect.TypeOf(DeprecatedResult{}), | ||||
| } | ||||
|  | ||||
| func deprecated(pass *analysis.Pass) (interface{}, error) { | ||||
| 	var names []*ast.Ident | ||||
|  | ||||
| 	extractDeprecatedMessage := func(docs []*ast.CommentGroup) string { | ||||
| 		for _, doc := range docs { | ||||
| 			if doc == nil { | ||||
| 				continue | ||||
| 			} | ||||
| 			parts := strings.Split(doc.Text(), "\n\n") | ||||
| 			last := parts[len(parts)-1] | ||||
| 			if !strings.HasPrefix(last, "Deprecated: ") { | ||||
| 				continue | ||||
| 			} | ||||
| 			alt := last[len("Deprecated: "):] | ||||
| 			alt = strings.Replace(alt, "\n", " ", -1) | ||||
| 			return alt | ||||
| 		} | ||||
| 		return "" | ||||
| 	} | ||||
| 	doDocs := func(names []*ast.Ident, docs []*ast.CommentGroup) { | ||||
| 		alt := extractDeprecatedMessage(docs) | ||||
| 		if alt == "" { | ||||
| 			return | ||||
| 		} | ||||
|  | ||||
| 		for _, name := range names { | ||||
| 			obj := pass.TypesInfo.ObjectOf(name) | ||||
| 			pass.ExportObjectFact(obj, &IsDeprecated{alt}) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	var docs []*ast.CommentGroup | ||||
| 	for _, f := range pass.Files { | ||||
| 		docs = append(docs, f.Doc) | ||||
| 	} | ||||
| 	if alt := extractDeprecatedMessage(docs); alt != "" { | ||||
| 		// Don't mark package syscall as deprecated, even though | ||||
| 		// it is. A lot of people still use it for simple | ||||
| 		// constants like SIGKILL, and I am not comfortable | ||||
| 		// telling them to use x/sys for that. | ||||
| 		if pass.Pkg.Path() != "syscall" { | ||||
| 			pass.ExportPackageFact(&IsDeprecated{alt}) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	docs = docs[:0] | ||||
| 	for _, f := range pass.Files { | ||||
| 		fn := func(node ast.Node) bool { | ||||
| 			if node == nil { | ||||
| 				return true | ||||
| 			} | ||||
| 			var ret bool | ||||
| 			switch node := node.(type) { | ||||
| 			case *ast.GenDecl: | ||||
| 				switch node.Tok { | ||||
| 				case token.TYPE, token.CONST, token.VAR: | ||||
| 					docs = append(docs, node.Doc) | ||||
| 					return true | ||||
| 				default: | ||||
| 					return false | ||||
| 				} | ||||
| 			case *ast.FuncDecl: | ||||
| 				docs = append(docs, node.Doc) | ||||
| 				names = []*ast.Ident{node.Name} | ||||
| 				ret = false | ||||
| 			case *ast.TypeSpec: | ||||
| 				docs = append(docs, node.Doc) | ||||
| 				names = []*ast.Ident{node.Name} | ||||
| 				ret = true | ||||
| 			case *ast.ValueSpec: | ||||
| 				docs = append(docs, node.Doc) | ||||
| 				names = node.Names | ||||
| 				ret = false | ||||
| 			case *ast.File: | ||||
| 				return true | ||||
| 			case *ast.StructType: | ||||
| 				for _, field := range node.Fields.List { | ||||
| 					doDocs(field.Names, []*ast.CommentGroup{field.Doc}) | ||||
| 				} | ||||
| 				return false | ||||
| 			case *ast.InterfaceType: | ||||
| 				for _, field := range node.Methods.List { | ||||
| 					doDocs(field.Names, []*ast.CommentGroup{field.Doc}) | ||||
| 				} | ||||
| 				return false | ||||
| 			default: | ||||
| 				return false | ||||
| 			} | ||||
| 			if len(names) == 0 || len(docs) == 0 { | ||||
| 				return ret | ||||
| 			} | ||||
| 			doDocs(names, docs) | ||||
|  | ||||
| 			docs = docs[:0] | ||||
| 			names = nil | ||||
| 			return ret | ||||
| 		} | ||||
| 		ast.Inspect(f, fn) | ||||
| 	} | ||||
|  | ||||
| 	out := DeprecatedResult{ | ||||
| 		Objects:  map[types.Object]*IsDeprecated{}, | ||||
| 		Packages: map[*types.Package]*IsDeprecated{}, | ||||
| 	} | ||||
|  | ||||
| 	for _, fact := range pass.AllObjectFacts() { | ||||
| 		out.Objects[fact.Object] = fact.Fact.(*IsDeprecated) | ||||
| 	} | ||||
| 	for _, fact := range pass.AllPackageFacts() { | ||||
| 		out.Packages[fact.Package] = fact.Fact.(*IsDeprecated) | ||||
| 	} | ||||
|  | ||||
| 	return out, nil | ||||
| } | ||||
							
								
								
									
										86
									
								
								vendor/honnef.co/go/tools/facts/generated.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										86
									
								
								vendor/honnef.co/go/tools/facts/generated.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,86 @@ | ||||
| package facts | ||||
|  | ||||
| import ( | ||||
| 	"bufio" | ||||
| 	"bytes" | ||||
| 	"io" | ||||
| 	"os" | ||||
| 	"reflect" | ||||
| 	"strings" | ||||
|  | ||||
| 	"golang.org/x/tools/go/analysis" | ||||
| ) | ||||
|  | ||||
| type Generator int | ||||
|  | ||||
| // A list of known generators we can detect | ||||
| const ( | ||||
| 	Unknown Generator = iota | ||||
| 	Goyacc | ||||
| 	Cgo | ||||
| 	Stringer | ||||
| ) | ||||
|  | ||||
| var ( | ||||
| 	// used by cgo before Go 1.11 | ||||
| 	oldCgo = []byte("// Created by cgo - DO NOT EDIT") | ||||
| 	prefix = []byte("// Code generated ") | ||||
| 	suffix = []byte(" DO NOT EDIT.") | ||||
| 	nl     = []byte("\n") | ||||
| 	crnl   = []byte("\r\n") | ||||
| ) | ||||
|  | ||||
| func isGenerated(path string) (Generator, bool) { | ||||
| 	f, err := os.Open(path) | ||||
| 	if err != nil { | ||||
| 		return 0, false | ||||
| 	} | ||||
| 	defer f.Close() | ||||
| 	br := bufio.NewReader(f) | ||||
| 	for { | ||||
| 		s, err := br.ReadBytes('\n') | ||||
| 		if err != nil && err != io.EOF { | ||||
| 			return 0, false | ||||
| 		} | ||||
| 		s = bytes.TrimSuffix(s, crnl) | ||||
| 		s = bytes.TrimSuffix(s, nl) | ||||
| 		if bytes.HasPrefix(s, prefix) && bytes.HasSuffix(s, suffix) { | ||||
| 			text := string(s[len(prefix) : len(s)-len(suffix)]) | ||||
| 			switch text { | ||||
| 			case "by goyacc.": | ||||
| 				return Goyacc, true | ||||
| 			case "by cmd/cgo;": | ||||
| 				return Cgo, true | ||||
| 			} | ||||
| 			if strings.HasPrefix(text, `by "stringer `) { | ||||
| 				return Stringer, true | ||||
| 			} | ||||
| 			return Unknown, true | ||||
| 		} | ||||
| 		if bytes.Equal(s, oldCgo) { | ||||
| 			return Cgo, true | ||||
| 		} | ||||
| 		if err == io.EOF { | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
| 	return 0, false | ||||
| } | ||||
|  | ||||
| var Generated = &analysis.Analyzer{ | ||||
| 	Name: "isgenerated", | ||||
| 	Doc:  "annotate file names that have been code generated", | ||||
| 	Run: func(pass *analysis.Pass) (interface{}, error) { | ||||
| 		m := map[string]Generator{} | ||||
| 		for _, f := range pass.Files { | ||||
| 			path := pass.Fset.PositionFor(f.Pos(), false).Filename | ||||
| 			g, ok := isGenerated(path) | ||||
| 			if ok { | ||||
| 				m[path] = g | ||||
| 			} | ||||
| 		} | ||||
| 		return m, nil | ||||
| 	}, | ||||
| 	RunDespiteErrors: true, | ||||
| 	ResultType:       reflect.TypeOf(map[string]Generator{}), | ||||
| } | ||||
							
								
								
									
										175
									
								
								vendor/honnef.co/go/tools/facts/purity.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										175
									
								
								vendor/honnef.co/go/tools/facts/purity.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,175 @@ | ||||
| package facts | ||||
|  | ||||
| import ( | ||||
| 	"go/token" | ||||
| 	"go/types" | ||||
| 	"reflect" | ||||
|  | ||||
| 	"golang.org/x/tools/go/analysis" | ||||
| 	"honnef.co/go/tools/functions" | ||||
| 	"honnef.co/go/tools/internal/passes/buildssa" | ||||
| 	"honnef.co/go/tools/ssa" | ||||
| ) | ||||
|  | ||||
| type IsPure struct{} | ||||
|  | ||||
| func (*IsPure) AFact()           {} | ||||
| func (d *IsPure) String() string { return "is pure" } | ||||
|  | ||||
| type PurityResult map[*types.Func]*IsPure | ||||
|  | ||||
| var Purity = &analysis.Analyzer{ | ||||
| 	Name:       "fact_purity", | ||||
| 	Doc:        "Mark pure functions", | ||||
| 	Run:        purity, | ||||
| 	Requires:   []*analysis.Analyzer{buildssa.Analyzer}, | ||||
| 	FactTypes:  []analysis.Fact{(*IsPure)(nil)}, | ||||
| 	ResultType: reflect.TypeOf(PurityResult{}), | ||||
| } | ||||
|  | ||||
| var pureStdlib = map[string]struct{}{ | ||||
| 	"errors.New":                      {}, | ||||
| 	"fmt.Errorf":                      {}, | ||||
| 	"fmt.Sprintf":                     {}, | ||||
| 	"fmt.Sprint":                      {}, | ||||
| 	"sort.Reverse":                    {}, | ||||
| 	"strings.Map":                     {}, | ||||
| 	"strings.Repeat":                  {}, | ||||
| 	"strings.Replace":                 {}, | ||||
| 	"strings.Title":                   {}, | ||||
| 	"strings.ToLower":                 {}, | ||||
| 	"strings.ToLowerSpecial":          {}, | ||||
| 	"strings.ToTitle":                 {}, | ||||
| 	"strings.ToTitleSpecial":          {}, | ||||
| 	"strings.ToUpper":                 {}, | ||||
| 	"strings.ToUpperSpecial":          {}, | ||||
| 	"strings.Trim":                    {}, | ||||
| 	"strings.TrimFunc":                {}, | ||||
| 	"strings.TrimLeft":                {}, | ||||
| 	"strings.TrimLeftFunc":            {}, | ||||
| 	"strings.TrimPrefix":              {}, | ||||
| 	"strings.TrimRight":               {}, | ||||
| 	"strings.TrimRightFunc":           {}, | ||||
| 	"strings.TrimSpace":               {}, | ||||
| 	"strings.TrimSuffix":              {}, | ||||
| 	"(*net/http.Request).WithContext": {}, | ||||
| } | ||||
|  | ||||
| func purity(pass *analysis.Pass) (interface{}, error) { | ||||
| 	seen := map[*ssa.Function]struct{}{} | ||||
| 	ssapkg := pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).Pkg | ||||
| 	var check func(ssafn *ssa.Function) (ret bool) | ||||
| 	check = func(ssafn *ssa.Function) (ret bool) { | ||||
| 		if ssafn.Object() == nil { | ||||
| 			// TODO(dh): support closures | ||||
| 			return false | ||||
| 		} | ||||
| 		if pass.ImportObjectFact(ssafn.Object(), new(IsPure)) { | ||||
| 			return true | ||||
| 		} | ||||
| 		if ssafn.Pkg != ssapkg { | ||||
| 			// Function is in another package but wasn't marked as | ||||
| 			// pure, ergo it isn't pure | ||||
| 			return false | ||||
| 		} | ||||
| 		// Break recursion | ||||
| 		if _, ok := seen[ssafn]; ok { | ||||
| 			return false | ||||
| 		} | ||||
|  | ||||
| 		seen[ssafn] = struct{}{} | ||||
| 		defer func() { | ||||
| 			if ret { | ||||
| 				pass.ExportObjectFact(ssafn.Object(), &IsPure{}) | ||||
| 			} | ||||
| 		}() | ||||
|  | ||||
| 		if functions.IsStub(ssafn) { | ||||
| 			return false | ||||
| 		} | ||||
|  | ||||
| 		if _, ok := pureStdlib[ssafn.Object().(*types.Func).FullName()]; ok { | ||||
| 			return true | ||||
| 		} | ||||
|  | ||||
| 		if ssafn.Signature.Results().Len() == 0 { | ||||
| 			// A function with no return values is empty or is doing some | ||||
| 			// work we cannot see (for example because of build tags); | ||||
| 			// don't consider it pure. | ||||
| 			return false | ||||
| 		} | ||||
|  | ||||
| 		for _, param := range ssafn.Params { | ||||
| 			if _, ok := param.Type().Underlying().(*types.Basic); !ok { | ||||
| 				return false | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		if ssafn.Blocks == nil { | ||||
| 			return false | ||||
| 		} | ||||
| 		checkCall := func(common *ssa.CallCommon) bool { | ||||
| 			if common.IsInvoke() { | ||||
| 				return false | ||||
| 			} | ||||
| 			builtin, ok := common.Value.(*ssa.Builtin) | ||||
| 			if !ok { | ||||
| 				if common.StaticCallee() != ssafn { | ||||
| 					if common.StaticCallee() == nil { | ||||
| 						return false | ||||
| 					} | ||||
| 					if !check(common.StaticCallee()) { | ||||
| 						return false | ||||
| 					} | ||||
| 				} | ||||
| 			} else { | ||||
| 				switch builtin.Name() { | ||||
| 				case "len", "cap", "make", "new": | ||||
| 				default: | ||||
| 					return false | ||||
| 				} | ||||
| 			} | ||||
| 			return true | ||||
| 		} | ||||
| 		for _, b := range ssafn.Blocks { | ||||
| 			for _, ins := range b.Instrs { | ||||
| 				switch ins := ins.(type) { | ||||
| 				case *ssa.Call: | ||||
| 					if !checkCall(ins.Common()) { | ||||
| 						return false | ||||
| 					} | ||||
| 				case *ssa.Defer: | ||||
| 					if !checkCall(&ins.Call) { | ||||
| 						return false | ||||
| 					} | ||||
| 				case *ssa.Select: | ||||
| 					return false | ||||
| 				case *ssa.Send: | ||||
| 					return false | ||||
| 				case *ssa.Go: | ||||
| 					return false | ||||
| 				case *ssa.Panic: | ||||
| 					return false | ||||
| 				case *ssa.Store: | ||||
| 					return false | ||||
| 				case *ssa.FieldAddr: | ||||
| 					return false | ||||
| 				case *ssa.UnOp: | ||||
| 					if ins.Op == token.MUL || ins.Op == token.AND { | ||||
| 						return false | ||||
| 					} | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 		return true | ||||
| 	} | ||||
| 	for _, ssafn := range pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs { | ||||
| 		check(ssafn) | ||||
| 	} | ||||
|  | ||||
| 	out := PurityResult{} | ||||
| 	for _, fact := range pass.AllObjectFacts() { | ||||
| 		out[fact.Object.(*types.Func)] = fact.Fact.(*IsPure) | ||||
| 	} | ||||
| 	return out, nil | ||||
| } | ||||
							
								
								
									
										24
									
								
								vendor/honnef.co/go/tools/facts/token.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										24
									
								
								vendor/honnef.co/go/tools/facts/token.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,24 @@ | ||||
| package facts | ||||
|  | ||||
| import ( | ||||
| 	"go/ast" | ||||
| 	"go/token" | ||||
| 	"reflect" | ||||
|  | ||||
| 	"golang.org/x/tools/go/analysis" | ||||
| ) | ||||
|  | ||||
| var TokenFile = &analysis.Analyzer{ | ||||
| 	Name: "tokenfileanalyzer", | ||||
| 	Doc:  "creates a mapping of *token.File to *ast.File", | ||||
| 	Run: func(pass *analysis.Pass) (interface{}, error) { | ||||
| 		m := map[*token.File]*ast.File{} | ||||
| 		for _, af := range pass.Files { | ||||
| 			tf := pass.Fset.File(af.Pos()) | ||||
| 			m[tf] = af | ||||
| 		} | ||||
| 		return m, nil | ||||
| 	}, | ||||
| 	RunDespiteErrors: true, | ||||
| 	ResultType:       reflect.TypeOf(map[*token.File]*ast.File{}), | ||||
| } | ||||
							
								
								
									
										28
									
								
								vendor/honnef.co/go/tools/functions/BUILD
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										28
									
								
								vendor/honnef.co/go/tools/functions/BUILD
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,28 @@ | ||||
| load("@io_bazel_rules_go//go:def.bzl", "go_library") | ||||
|  | ||||
| go_library( | ||||
|     name = "go_default_library", | ||||
|     srcs = [ | ||||
|         "loops.go", | ||||
|         "pure.go", | ||||
|         "terminates.go", | ||||
|     ], | ||||
|     importmap = "k8s.io/kubernetes/vendor/honnef.co/go/tools/functions", | ||||
|     importpath = "honnef.co/go/tools/functions", | ||||
|     visibility = ["//visibility:public"], | ||||
|     deps = ["//vendor/honnef.co/go/tools/ssa:go_default_library"], | ||||
| ) | ||||
|  | ||||
| filegroup( | ||||
|     name = "package-srcs", | ||||
|     srcs = glob(["**"]), | ||||
|     tags = ["automanaged"], | ||||
|     visibility = ["//visibility:private"], | ||||
| ) | ||||
|  | ||||
| filegroup( | ||||
|     name = "all-srcs", | ||||
|     srcs = [":package-srcs"], | ||||
|     tags = ["automanaged"], | ||||
|     visibility = ["//visibility:public"], | ||||
| ) | ||||
							
								
								
									
										54
									
								
								vendor/honnef.co/go/tools/functions/loops.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										54
									
								
								vendor/honnef.co/go/tools/functions/loops.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,54 @@ | ||||
| package functions | ||||
|  | ||||
| import "honnef.co/go/tools/ssa" | ||||
|  | ||||
| type Loop struct{ ssa.BlockSet } | ||||
|  | ||||
| func FindLoops(fn *ssa.Function) []Loop { | ||||
| 	if fn.Blocks == nil { | ||||
| 		return nil | ||||
| 	} | ||||
| 	tree := fn.DomPreorder() | ||||
| 	var sets []Loop | ||||
| 	for _, h := range tree { | ||||
| 		for _, n := range h.Preds { | ||||
| 			if !h.Dominates(n) { | ||||
| 				continue | ||||
| 			} | ||||
| 			// n is a back-edge to h | ||||
| 			// h is the loop header | ||||
| 			if n == h { | ||||
| 				set := Loop{} | ||||
| 				set.Add(n) | ||||
| 				sets = append(sets, set) | ||||
| 				continue | ||||
| 			} | ||||
| 			set := Loop{} | ||||
| 			set.Add(h) | ||||
| 			set.Add(n) | ||||
| 			for _, b := range allPredsBut(n, h, nil) { | ||||
| 				set.Add(b) | ||||
| 			} | ||||
| 			sets = append(sets, set) | ||||
| 		} | ||||
| 	} | ||||
| 	return sets | ||||
| } | ||||
|  | ||||
| func allPredsBut(b, but *ssa.BasicBlock, list []*ssa.BasicBlock) []*ssa.BasicBlock { | ||||
| outer: | ||||
| 	for _, pred := range b.Preds { | ||||
| 		if pred == but { | ||||
| 			continue | ||||
| 		} | ||||
| 		for _, p := range list { | ||||
| 			// TODO improve big-o complexity of this function | ||||
| 			if pred == p { | ||||
| 				continue outer | ||||
| 			} | ||||
| 		} | ||||
| 		list = append(list, pred) | ||||
| 		list = allPredsBut(pred, but, list) | ||||
| 	} | ||||
| 	return list | ||||
| } | ||||
							
								
								
									
										46
									
								
								vendor/honnef.co/go/tools/functions/pure.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										46
									
								
								vendor/honnef.co/go/tools/functions/pure.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,46 @@ | ||||
| package functions | ||||
|  | ||||
| import ( | ||||
| 	"honnef.co/go/tools/ssa" | ||||
| ) | ||||
|  | ||||
| func filterDebug(instr []ssa.Instruction) []ssa.Instruction { | ||||
| 	var out []ssa.Instruction | ||||
| 	for _, ins := range instr { | ||||
| 		if _, ok := ins.(*ssa.DebugRef); !ok { | ||||
| 			out = append(out, ins) | ||||
| 		} | ||||
| 	} | ||||
| 	return out | ||||
| } | ||||
|  | ||||
| // IsStub reports whether a function is a stub. A function is | ||||
| // considered a stub if it has no instructions or exactly one | ||||
| // instruction, which must be either returning only constant values or | ||||
| // a panic. | ||||
| func IsStub(fn *ssa.Function) bool { | ||||
| 	if len(fn.Blocks) == 0 { | ||||
| 		return true | ||||
| 	} | ||||
| 	if len(fn.Blocks) > 1 { | ||||
| 		return false | ||||
| 	} | ||||
| 	instrs := filterDebug(fn.Blocks[0].Instrs) | ||||
| 	if len(instrs) != 1 { | ||||
| 		return false | ||||
| 	} | ||||
|  | ||||
| 	switch instrs[0].(type) { | ||||
| 	case *ssa.Return: | ||||
| 		// Since this is the only instruction, the return value must | ||||
| 		// be a constant. We consider all constants as stubs, not just | ||||
| 		// the zero value. This does not, unfortunately, cover zero | ||||
| 		// initialised structs, as these cause additional | ||||
| 		// instructions. | ||||
| 		return true | ||||
| 	case *ssa.Panic: | ||||
| 		return true | ||||
| 	default: | ||||
| 		return false | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										24
									
								
								vendor/honnef.co/go/tools/functions/terminates.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										24
									
								
								vendor/honnef.co/go/tools/functions/terminates.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,24 @@ | ||||
| package functions | ||||
|  | ||||
| import "honnef.co/go/tools/ssa" | ||||
|  | ||||
| // Terminates reports whether fn is supposed to return, that is if it | ||||
| // has at least one theoretic path that returns from the function. | ||||
| // Explicit panics do not count as terminating. | ||||
| func Terminates(fn *ssa.Function) bool { | ||||
| 	if fn.Blocks == nil { | ||||
| 		// assuming that a function terminates is the conservative | ||||
| 		// choice | ||||
| 		return true | ||||
| 	} | ||||
|  | ||||
| 	for _, block := range fn.Blocks { | ||||
| 		if len(block.Instrs) == 0 { | ||||
| 			continue | ||||
| 		} | ||||
| 		if _, ok := block.Instrs[len(block.Instrs)-1].(*ssa.Return); ok { | ||||
| 			return true | ||||
| 		} | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
							
								
								
									
										31
									
								
								vendor/honnef.co/go/tools/go/types/typeutil/BUILD
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										31
									
								
								vendor/honnef.co/go/tools/go/types/typeutil/BUILD
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,31 @@ | ||||
| load("@io_bazel_rules_go//go:def.bzl", "go_library") | ||||
|  | ||||
| go_library( | ||||
|     name = "go_default_library", | ||||
|     srcs = [ | ||||
|         "callee.go", | ||||
|         "identical.go", | ||||
|         "imports.go", | ||||
|         "map.go", | ||||
|         "methodsetcache.go", | ||||
|         "ui.go", | ||||
|     ], | ||||
|     importmap = "k8s.io/kubernetes/vendor/honnef.co/go/tools/go/types/typeutil", | ||||
|     importpath = "honnef.co/go/tools/go/types/typeutil", | ||||
|     visibility = ["//visibility:public"], | ||||
|     deps = ["//vendor/golang.org/x/tools/go/ast/astutil:go_default_library"], | ||||
| ) | ||||
|  | ||||
| filegroup( | ||||
|     name = "package-srcs", | ||||
|     srcs = glob(["**"]), | ||||
|     tags = ["automanaged"], | ||||
|     visibility = ["//visibility:private"], | ||||
| ) | ||||
|  | ||||
| filegroup( | ||||
|     name = "all-srcs", | ||||
|     srcs = [":package-srcs"], | ||||
|     tags = ["automanaged"], | ||||
|     visibility = ["//visibility:public"], | ||||
| ) | ||||
							
								
								
									
										46
									
								
								vendor/honnef.co/go/tools/go/types/typeutil/callee.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										46
									
								
								vendor/honnef.co/go/tools/go/types/typeutil/callee.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,46 @@ | ||||
| // Copyright 2018 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package typeutil | ||||
|  | ||||
| import ( | ||||
| 	"go/ast" | ||||
| 	"go/types" | ||||
|  | ||||
| 	"golang.org/x/tools/go/ast/astutil" | ||||
| ) | ||||
|  | ||||
| // Callee returns the named target of a function call, if any: | ||||
| // a function, method, builtin, or variable. | ||||
| func Callee(info *types.Info, call *ast.CallExpr) types.Object { | ||||
| 	var obj types.Object | ||||
| 	switch fun := astutil.Unparen(call.Fun).(type) { | ||||
| 	case *ast.Ident: | ||||
| 		obj = info.Uses[fun] // type, var, builtin, or declared func | ||||
| 	case *ast.SelectorExpr: | ||||
| 		if sel, ok := info.Selections[fun]; ok { | ||||
| 			obj = sel.Obj() // method or field | ||||
| 		} else { | ||||
| 			obj = info.Uses[fun.Sel] // qualified identifier? | ||||
| 		} | ||||
| 	} | ||||
| 	if _, ok := obj.(*types.TypeName); ok { | ||||
| 		return nil // T(x) is a conversion, not a call | ||||
| 	} | ||||
| 	return obj | ||||
| } | ||||
|  | ||||
| // StaticCallee returns the target (function or method) of a static | ||||
| // function call, if any. It returns nil for calls to builtins. | ||||
| func StaticCallee(info *types.Info, call *ast.CallExpr) *types.Func { | ||||
| 	if f, ok := Callee(info, call).(*types.Func); ok && !interfaceMethod(f) { | ||||
| 		return f | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func interfaceMethod(f *types.Func) bool { | ||||
| 	recv := f.Type().(*types.Signature).Recv() | ||||
| 	return recv != nil && types.IsInterface(recv.Type()) | ||||
| } | ||||
							
								
								
									
										75
									
								
								vendor/honnef.co/go/tools/go/types/typeutil/identical.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										75
									
								
								vendor/honnef.co/go/tools/go/types/typeutil/identical.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,75 @@ | ||||
| package typeutil | ||||
|  | ||||
| import ( | ||||
| 	"go/types" | ||||
| ) | ||||
|  | ||||
| // Identical reports whether x and y are identical types. | ||||
| // Unlike types.Identical, receivers of Signature types are not ignored. | ||||
| // Unlike types.Identical, interfaces are compared via pointer equality (except for the empty interface, which gets deduplicated). | ||||
| // Unlike types.Identical, structs are compared via pointer equality. | ||||
| func Identical(x, y types.Type) (ret bool) { | ||||
| 	if !types.Identical(x, y) { | ||||
| 		return false | ||||
| 	} | ||||
|  | ||||
| 	switch x := x.(type) { | ||||
| 	case *types.Struct: | ||||
| 		y, ok := y.(*types.Struct) | ||||
| 		if !ok { | ||||
| 			// should be impossible | ||||
| 			return true | ||||
| 		} | ||||
| 		return x == y | ||||
| 	case *types.Interface: | ||||
| 		// The issue with interfaces, typeutil.Map and types.Identical | ||||
| 		// | ||||
| 		// types.Identical, when comparing two interfaces, only looks at the set | ||||
| 		// of all methods, not differentiating between implicit (embedded) and | ||||
| 		// explicit methods. | ||||
| 		// | ||||
| 		// When we see the following two types, in source order | ||||
| 		// | ||||
| 		// type I1 interface { foo() } | ||||
| 		// type I2 interface { I1 } | ||||
| 		// | ||||
| 		// then we will first correctly process I1 and its underlying type. When | ||||
| 		// we get to I2, we will see that its underlying type is identical to | ||||
| 		// that of I1 and not process it again. This, however, means that we will | ||||
| 		// not record the fact that I2 embeds I1. If only I2 is reachable via the | ||||
| 		// graph root, then I1 will not be considered used. | ||||
| 		// | ||||
| 		// We choose to be lazy and compare interfaces by their | ||||
| 		// pointers. This will obviously miss identical interfaces, | ||||
| 		// but this only has a runtime cost, it doesn't affect | ||||
| 		// correctness. | ||||
| 		y, ok := y.(*types.Interface) | ||||
| 		if !ok { | ||||
| 			// should be impossible | ||||
| 			return true | ||||
| 		} | ||||
| 		if x.NumEmbeddeds() == 0 && | ||||
| 			y.NumEmbeddeds() == 0 && | ||||
| 			x.NumMethods() == 0 && | ||||
| 			y.NumMethods() == 0 { | ||||
| 			// all truly empty interfaces are the same | ||||
| 			return true | ||||
| 		} | ||||
| 		return x == y | ||||
| 	case *types.Signature: | ||||
| 		y, ok := y.(*types.Signature) | ||||
| 		if !ok { | ||||
| 			// should be impossible | ||||
| 			return true | ||||
| 		} | ||||
| 		if x.Recv() == y.Recv() { | ||||
| 			return true | ||||
| 		} | ||||
| 		if x.Recv() == nil || y.Recv() == nil { | ||||
| 			return false | ||||
| 		} | ||||
| 		return Identical(x.Recv().Type(), y.Recv().Type()) | ||||
| 	default: | ||||
| 		return true | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										31
									
								
								vendor/honnef.co/go/tools/go/types/typeutil/imports.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										31
									
								
								vendor/honnef.co/go/tools/go/types/typeutil/imports.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,31 @@ | ||||
| // Copyright 2014 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package typeutil | ||||
|  | ||||
| import "go/types" | ||||
|  | ||||
| // Dependencies returns all dependencies of the specified packages. | ||||
| // | ||||
| // Dependent packages appear in topological order: if package P imports | ||||
| // package Q, Q appears earlier than P in the result. | ||||
| // The algorithm follows import statements in the order they | ||||
| // appear in the source code, so the result is a total order. | ||||
| // | ||||
| func Dependencies(pkgs ...*types.Package) []*types.Package { | ||||
| 	var result []*types.Package | ||||
| 	seen := make(map[*types.Package]bool) | ||||
| 	var visit func(pkgs []*types.Package) | ||||
| 	visit = func(pkgs []*types.Package) { | ||||
| 		for _, p := range pkgs { | ||||
| 			if !seen[p] { | ||||
| 				seen[p] = true | ||||
| 				visit(p.Imports()) | ||||
| 				result = append(result, p) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	visit(pkgs) | ||||
| 	return result | ||||
| } | ||||
							
								
								
									
										319
									
								
								vendor/honnef.co/go/tools/go/types/typeutil/map.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										319
									
								
								vendor/honnef.co/go/tools/go/types/typeutil/map.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,319 @@ | ||||
| // Copyright 2014 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| // Package typeutil defines various utilities for types, such as Map, | ||||
| // a mapping from types.Type to interface{} values. | ||||
| package typeutil | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"fmt" | ||||
| 	"go/types" | ||||
| 	"reflect" | ||||
| ) | ||||
|  | ||||
| // Map is a hash-table-based mapping from types (types.Type) to | ||||
| // arbitrary interface{} values.  The concrete types that implement | ||||
| // the Type interface are pointers.  Since they are not canonicalized, | ||||
| // == cannot be used to check for equivalence, and thus we cannot | ||||
| // simply use a Go map. | ||||
| // | ||||
| // Just as with map[K]V, a nil *Map is a valid empty map. | ||||
| // | ||||
| // Not thread-safe. | ||||
| // | ||||
| // This fork handles Signatures correctly, respecting method | ||||
| // receivers. Furthermore, it doesn't deduplicate interfaces or | ||||
| // structs. Interfaces aren't deduplicated as not to conflate implicit | ||||
| // and explicit methods. Structs aren't deduplicated because we track | ||||
| // fields of each type separately. | ||||
| // | ||||
| type Map struct { | ||||
| 	hasher Hasher             // shared by many Maps | ||||
| 	table  map[uint32][]entry // maps hash to bucket; entry.key==nil means unused | ||||
| 	length int                // number of map entries | ||||
| } | ||||
|  | ||||
| // entry is an entry (key/value association) in a hash bucket. | ||||
| type entry struct { | ||||
| 	key   types.Type | ||||
| 	value interface{} | ||||
| } | ||||
|  | ||||
| // SetHasher sets the hasher used by Map. | ||||
| // | ||||
| // All Hashers are functionally equivalent but contain internal state | ||||
| // used to cache the results of hashing previously seen types. | ||||
| // | ||||
| // A single Hasher created by MakeHasher() may be shared among many | ||||
| // Maps.  This is recommended if the instances have many keys in | ||||
| // common, as it will amortize the cost of hash computation. | ||||
| // | ||||
| // A Hasher may grow without bound as new types are seen.  Even when a | ||||
| // type is deleted from the map, the Hasher never shrinks, since other | ||||
| // types in the map may reference the deleted type indirectly. | ||||
| // | ||||
| // Hashers are not thread-safe, and read-only operations such as | ||||
| // Map.Lookup require updates to the hasher, so a full Mutex lock (not a | ||||
| // read-lock) is require around all Map operations if a shared | ||||
| // hasher is accessed from multiple threads. | ||||
| // | ||||
| // If SetHasher is not called, the Map will create a private hasher at | ||||
| // the first call to Insert. | ||||
| // | ||||
| func (m *Map) SetHasher(hasher Hasher) { | ||||
| 	m.hasher = hasher | ||||
| } | ||||
|  | ||||
| // Delete removes the entry with the given key, if any. | ||||
| // It returns true if the entry was found. | ||||
| // | ||||
| func (m *Map) Delete(key types.Type) bool { | ||||
| 	if m != nil && m.table != nil { | ||||
| 		hash := m.hasher.Hash(key) | ||||
| 		bucket := m.table[hash] | ||||
| 		for i, e := range bucket { | ||||
| 			if e.key != nil && Identical(key, e.key) { | ||||
| 				// We can't compact the bucket as it | ||||
| 				// would disturb iterators. | ||||
| 				bucket[i] = entry{} | ||||
| 				m.length-- | ||||
| 				return true | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| // At returns the map entry for the given key. | ||||
| // The result is nil if the entry is not present. | ||||
| // | ||||
| func (m *Map) At(key types.Type) interface{} { | ||||
| 	if m != nil && m.table != nil { | ||||
| 		for _, e := range m.table[m.hasher.Hash(key)] { | ||||
| 			if e.key != nil && Identical(key, e.key) { | ||||
| 				return e.value | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // Set sets the map entry for key to val, | ||||
| // and returns the previous entry, if any. | ||||
| func (m *Map) Set(key types.Type, value interface{}) (prev interface{}) { | ||||
| 	if m.table != nil { | ||||
| 		hash := m.hasher.Hash(key) | ||||
| 		bucket := m.table[hash] | ||||
| 		var hole *entry | ||||
| 		for i, e := range bucket { | ||||
| 			if e.key == nil { | ||||
| 				hole = &bucket[i] | ||||
| 			} else if Identical(key, e.key) { | ||||
| 				prev = e.value | ||||
| 				bucket[i].value = value | ||||
| 				return | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		if hole != nil { | ||||
| 			*hole = entry{key, value} // overwrite deleted entry | ||||
| 		} else { | ||||
| 			m.table[hash] = append(bucket, entry{key, value}) | ||||
| 		} | ||||
| 	} else { | ||||
| 		if m.hasher.memo == nil { | ||||
| 			m.hasher = MakeHasher() | ||||
| 		} | ||||
| 		hash := m.hasher.Hash(key) | ||||
| 		m.table = map[uint32][]entry{hash: {entry{key, value}}} | ||||
| 	} | ||||
|  | ||||
| 	m.length++ | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // Len returns the number of map entries. | ||||
| func (m *Map) Len() int { | ||||
| 	if m != nil { | ||||
| 		return m.length | ||||
| 	} | ||||
| 	return 0 | ||||
| } | ||||
|  | ||||
| // Iterate calls function f on each entry in the map in unspecified order. | ||||
| // | ||||
| // If f should mutate the map, Iterate provides the same guarantees as | ||||
| // Go maps: if f deletes a map entry that Iterate has not yet reached, | ||||
| // f will not be invoked for it, but if f inserts a map entry that | ||||
| // Iterate has not yet reached, whether or not f will be invoked for | ||||
| // it is unspecified. | ||||
| // | ||||
| func (m *Map) Iterate(f func(key types.Type, value interface{})) { | ||||
| 	if m != nil { | ||||
| 		for _, bucket := range m.table { | ||||
| 			for _, e := range bucket { | ||||
| 				if e.key != nil { | ||||
| 					f(e.key, e.value) | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Keys returns a new slice containing the set of map keys. | ||||
| // The order is unspecified. | ||||
| func (m *Map) Keys() []types.Type { | ||||
| 	keys := make([]types.Type, 0, m.Len()) | ||||
| 	m.Iterate(func(key types.Type, _ interface{}) { | ||||
| 		keys = append(keys, key) | ||||
| 	}) | ||||
| 	return keys | ||||
| } | ||||
|  | ||||
| func (m *Map) toString(values bool) string { | ||||
| 	if m == nil { | ||||
| 		return "{}" | ||||
| 	} | ||||
| 	var buf bytes.Buffer | ||||
| 	fmt.Fprint(&buf, "{") | ||||
| 	sep := "" | ||||
| 	m.Iterate(func(key types.Type, value interface{}) { | ||||
| 		fmt.Fprint(&buf, sep) | ||||
| 		sep = ", " | ||||
| 		fmt.Fprint(&buf, key) | ||||
| 		if values { | ||||
| 			fmt.Fprintf(&buf, ": %q", value) | ||||
| 		} | ||||
| 	}) | ||||
| 	fmt.Fprint(&buf, "}") | ||||
| 	return buf.String() | ||||
| } | ||||
|  | ||||
| // String returns a string representation of the map's entries. | ||||
| // Values are printed using fmt.Sprintf("%v", v). | ||||
| // Order is unspecified. | ||||
| // | ||||
| func (m *Map) String() string { | ||||
| 	return m.toString(true) | ||||
| } | ||||
|  | ||||
| // KeysString returns a string representation of the map's key set. | ||||
| // Order is unspecified. | ||||
| // | ||||
| func (m *Map) KeysString() string { | ||||
| 	return m.toString(false) | ||||
| } | ||||
|  | ||||
| //////////////////////////////////////////////////////////////////////// | ||||
| // Hasher | ||||
|  | ||||
| // A Hasher maps each type to its hash value. | ||||
| // For efficiency, a hasher uses memoization; thus its memory | ||||
| // footprint grows monotonically over time. | ||||
| // Hashers are not thread-safe. | ||||
| // Hashers have reference semantics. | ||||
| // Call MakeHasher to create a Hasher. | ||||
| type Hasher struct { | ||||
| 	memo map[types.Type]uint32 | ||||
| } | ||||
|  | ||||
| // MakeHasher returns a new Hasher instance. | ||||
| func MakeHasher() Hasher { | ||||
| 	return Hasher{make(map[types.Type]uint32)} | ||||
| } | ||||
|  | ||||
| // Hash computes a hash value for the given type t such that | ||||
| // Identical(t, t') => Hash(t) == Hash(t'). | ||||
| func (h Hasher) Hash(t types.Type) uint32 { | ||||
| 	hash, ok := h.memo[t] | ||||
| 	if !ok { | ||||
| 		hash = h.hashFor(t) | ||||
| 		h.memo[t] = hash | ||||
| 	} | ||||
| 	return hash | ||||
| } | ||||
|  | ||||
| // hashString computes the Fowler–Noll–Vo hash of s. | ||||
| func hashString(s string) uint32 { | ||||
| 	var h uint32 | ||||
| 	for i := 0; i < len(s); i++ { | ||||
| 		h ^= uint32(s[i]) | ||||
| 		h *= 16777619 | ||||
| 	} | ||||
| 	return h | ||||
| } | ||||
|  | ||||
| // hashFor computes the hash of t. | ||||
| func (h Hasher) hashFor(t types.Type) uint32 { | ||||
| 	// See Identical for rationale. | ||||
| 	switch t := t.(type) { | ||||
| 	case *types.Basic: | ||||
| 		return uint32(t.Kind()) | ||||
|  | ||||
| 	case *types.Array: | ||||
| 		return 9043 + 2*uint32(t.Len()) + 3*h.Hash(t.Elem()) | ||||
|  | ||||
| 	case *types.Slice: | ||||
| 		return 9049 + 2*h.Hash(t.Elem()) | ||||
|  | ||||
| 	case *types.Struct: | ||||
| 		var hash uint32 = 9059 | ||||
| 		for i, n := 0, t.NumFields(); i < n; i++ { | ||||
| 			f := t.Field(i) | ||||
| 			if f.Anonymous() { | ||||
| 				hash += 8861 | ||||
| 			} | ||||
| 			hash += hashString(t.Tag(i)) | ||||
| 			hash += hashString(f.Name()) // (ignore f.Pkg) | ||||
| 			hash += h.Hash(f.Type()) | ||||
| 		} | ||||
| 		return hash | ||||
|  | ||||
| 	case *types.Pointer: | ||||
| 		return 9067 + 2*h.Hash(t.Elem()) | ||||
|  | ||||
| 	case *types.Signature: | ||||
| 		var hash uint32 = 9091 | ||||
| 		if t.Variadic() { | ||||
| 			hash *= 8863 | ||||
| 		} | ||||
| 		return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results()) | ||||
|  | ||||
| 	case *types.Interface: | ||||
| 		var hash uint32 = 9103 | ||||
| 		for i, n := 0, t.NumMethods(); i < n; i++ { | ||||
| 			// See go/types.identicalMethods for rationale. | ||||
| 			// Method order is not significant. | ||||
| 			// Ignore m.Pkg(). | ||||
| 			m := t.Method(i) | ||||
| 			hash += 3*hashString(m.Name()) + 5*h.Hash(m.Type()) | ||||
| 		} | ||||
| 		return hash | ||||
|  | ||||
| 	case *types.Map: | ||||
| 		return 9109 + 2*h.Hash(t.Key()) + 3*h.Hash(t.Elem()) | ||||
|  | ||||
| 	case *types.Chan: | ||||
| 		return 9127 + 2*uint32(t.Dir()) + 3*h.Hash(t.Elem()) | ||||
|  | ||||
| 	case *types.Named: | ||||
| 		// Not safe with a copying GC; objects may move. | ||||
| 		return uint32(reflect.ValueOf(t.Obj()).Pointer()) | ||||
|  | ||||
| 	case *types.Tuple: | ||||
| 		return h.hashTuple(t) | ||||
| 	} | ||||
| 	panic(t) | ||||
| } | ||||
|  | ||||
| func (h Hasher) hashTuple(tuple *types.Tuple) uint32 { | ||||
| 	// See go/types.identicalTypes for rationale. | ||||
| 	n := tuple.Len() | ||||
| 	var hash uint32 = 9137 + 2*uint32(n) | ||||
| 	for i := 0; i < n; i++ { | ||||
| 		hash += 3 * h.Hash(tuple.At(i).Type()) | ||||
| 	} | ||||
| 	return hash | ||||
| } | ||||
							
								
								
									
										72
									
								
								vendor/honnef.co/go/tools/go/types/typeutil/methodsetcache.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										72
									
								
								vendor/honnef.co/go/tools/go/types/typeutil/methodsetcache.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,72 @@ | ||||
| // Copyright 2014 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| // This file implements a cache of method sets. | ||||
|  | ||||
| package typeutil | ||||
|  | ||||
| import ( | ||||
| 	"go/types" | ||||
| 	"sync" | ||||
| ) | ||||
|  | ||||
| // A MethodSetCache records the method set of each type T for which | ||||
| // MethodSet(T) is called so that repeat queries are fast. | ||||
| // The zero value is a ready-to-use cache instance. | ||||
| type MethodSetCache struct { | ||||
| 	mu     sync.Mutex | ||||
| 	named  map[*types.Named]struct{ value, pointer *types.MethodSet } // method sets for named N and *N | ||||
| 	others map[types.Type]*types.MethodSet                            // all other types | ||||
| } | ||||
|  | ||||
| // MethodSet returns the method set of type T.  It is thread-safe. | ||||
| // | ||||
| // If cache is nil, this function is equivalent to types.NewMethodSet(T). | ||||
| // Utility functions can thus expose an optional *MethodSetCache | ||||
| // parameter to clients that care about performance. | ||||
| // | ||||
| func (cache *MethodSetCache) MethodSet(T types.Type) *types.MethodSet { | ||||
| 	if cache == nil { | ||||
| 		return types.NewMethodSet(T) | ||||
| 	} | ||||
| 	cache.mu.Lock() | ||||
| 	defer cache.mu.Unlock() | ||||
|  | ||||
| 	switch T := T.(type) { | ||||
| 	case *types.Named: | ||||
| 		return cache.lookupNamed(T).value | ||||
|  | ||||
| 	case *types.Pointer: | ||||
| 		if N, ok := T.Elem().(*types.Named); ok { | ||||
| 			return cache.lookupNamed(N).pointer | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// all other types | ||||
| 	// (The map uses pointer equivalence, not type identity.) | ||||
| 	mset := cache.others[T] | ||||
| 	if mset == nil { | ||||
| 		mset = types.NewMethodSet(T) | ||||
| 		if cache.others == nil { | ||||
| 			cache.others = make(map[types.Type]*types.MethodSet) | ||||
| 		} | ||||
| 		cache.others[T] = mset | ||||
| 	} | ||||
| 	return mset | ||||
| } | ||||
|  | ||||
| func (cache *MethodSetCache) lookupNamed(named *types.Named) struct{ value, pointer *types.MethodSet } { | ||||
| 	if cache.named == nil { | ||||
| 		cache.named = make(map[*types.Named]struct{ value, pointer *types.MethodSet }) | ||||
| 	} | ||||
| 	// Avoid recomputing mset(*T) for each distinct Pointer | ||||
| 	// instance whose underlying type is a named type. | ||||
| 	msets, ok := cache.named[named] | ||||
| 	if !ok { | ||||
| 		msets.value = types.NewMethodSet(named) | ||||
| 		msets.pointer = types.NewMethodSet(types.NewPointer(named)) | ||||
| 		cache.named[named] = msets | ||||
| 	} | ||||
| 	return msets | ||||
| } | ||||
							
								
								
									
										52
									
								
								vendor/honnef.co/go/tools/go/types/typeutil/ui.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										52
									
								
								vendor/honnef.co/go/tools/go/types/typeutil/ui.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,52 @@ | ||||
| // Copyright 2014 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package typeutil | ||||
|  | ||||
| // This file defines utilities for user interfaces that display types. | ||||
|  | ||||
| import "go/types" | ||||
|  | ||||
| // IntuitiveMethodSet returns the intuitive method set of a type T, | ||||
| // which is the set of methods you can call on an addressable value of | ||||
| // that type. | ||||
| // | ||||
| // The result always contains MethodSet(T), and is exactly MethodSet(T) | ||||
| // for interface types and for pointer-to-concrete types. | ||||
| // For all other concrete types T, the result additionally | ||||
| // contains each method belonging to *T if there is no identically | ||||
| // named method on T itself. | ||||
| // | ||||
| // This corresponds to user intuition about method sets; | ||||
| // this function is intended only for user interfaces. | ||||
| // | ||||
| // The order of the result is as for types.MethodSet(T). | ||||
| // | ||||
| func IntuitiveMethodSet(T types.Type, msets *MethodSetCache) []*types.Selection { | ||||
| 	isPointerToConcrete := func(T types.Type) bool { | ||||
| 		ptr, ok := T.(*types.Pointer) | ||||
| 		return ok && !types.IsInterface(ptr.Elem()) | ||||
| 	} | ||||
|  | ||||
| 	var result []*types.Selection | ||||
| 	mset := msets.MethodSet(T) | ||||
| 	if types.IsInterface(T) || isPointerToConcrete(T) { | ||||
| 		for i, n := 0, mset.Len(); i < n; i++ { | ||||
| 			result = append(result, mset.At(i)) | ||||
| 		} | ||||
| 	} else { | ||||
| 		// T is some other concrete type. | ||||
| 		// Report methods of T and *T, preferring those of T. | ||||
| 		pmset := msets.MethodSet(types.NewPointer(T)) | ||||
| 		for i, n := 0, pmset.Len(); i < n; i++ { | ||||
| 			meth := pmset.At(i) | ||||
| 			if m := mset.Lookup(meth.Obj().Pkg(), meth.Obj().Name()); m != nil { | ||||
| 				meth = m | ||||
| 			} | ||||
| 			result = append(result, meth) | ||||
| 		} | ||||
|  | ||||
| 	} | ||||
| 	return result | ||||
| } | ||||
							
								
								
									
										28
									
								
								vendor/honnef.co/go/tools/internal/cache/BUILD
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										28
									
								
								vendor/honnef.co/go/tools/internal/cache/BUILD
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,28 @@ | ||||
| load("@io_bazel_rules_go//go:def.bzl", "go_library") | ||||
|  | ||||
| go_library( | ||||
|     name = "go_default_library", | ||||
|     srcs = [ | ||||
|         "cache.go", | ||||
|         "default.go", | ||||
|         "hash.go", | ||||
|     ], | ||||
|     importmap = "k8s.io/kubernetes/vendor/honnef.co/go/tools/internal/cache", | ||||
|     importpath = "honnef.co/go/tools/internal/cache", | ||||
|     visibility = ["//vendor/honnef.co/go/tools:__subpackages__"], | ||||
|     deps = ["//vendor/honnef.co/go/tools/internal/renameio:go_default_library"], | ||||
| ) | ||||
|  | ||||
| filegroup( | ||||
|     name = "package-srcs", | ||||
|     srcs = glob(["**"]), | ||||
|     tags = ["automanaged"], | ||||
|     visibility = ["//visibility:private"], | ||||
| ) | ||||
|  | ||||
| filegroup( | ||||
|     name = "all-srcs", | ||||
|     srcs = [":package-srcs"], | ||||
|     tags = ["automanaged"], | ||||
|     visibility = ["//visibility:public"], | ||||
| ) | ||||
							
								
								
									
										474
									
								
								vendor/honnef.co/go/tools/internal/cache/cache.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										474
									
								
								vendor/honnef.co/go/tools/internal/cache/cache.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,474 @@ | ||||
| // Copyright 2017 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| // Package cache implements a build artifact cache. | ||||
| // | ||||
| // This package is a slightly modified fork of Go's | ||||
| // cmd/go/internal/cache package. | ||||
| package cache | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"crypto/sha256" | ||||
| 	"encoding/hex" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"io/ioutil" | ||||
| 	"os" | ||||
| 	"path/filepath" | ||||
| 	"strconv" | ||||
| 	"strings" | ||||
| 	"time" | ||||
|  | ||||
| 	"honnef.co/go/tools/internal/renameio" | ||||
| ) | ||||
|  | ||||
| // An ActionID is a cache action key, the hash of a complete description of a | ||||
| // repeatable computation (command line, environment variables, | ||||
| // input file contents, executable contents). | ||||
| type ActionID [HashSize]byte | ||||
|  | ||||
| // An OutputID is a cache output key, the hash of an output of a computation. | ||||
| type OutputID [HashSize]byte | ||||
|  | ||||
| // A Cache is a package cache, backed by a file system directory tree. | ||||
| type Cache struct { | ||||
| 	dir string | ||||
| 	now func() time.Time | ||||
| } | ||||
|  | ||||
| // Open opens and returns the cache in the given directory. | ||||
| // | ||||
| // It is safe for multiple processes on a single machine to use the | ||||
| // same cache directory in a local file system simultaneously. | ||||
| // They will coordinate using operating system file locks and may | ||||
| // duplicate effort but will not corrupt the cache. | ||||
| // | ||||
| // However, it is NOT safe for multiple processes on different machines | ||||
| // to share a cache directory (for example, if the directory were stored | ||||
| // in a network file system). File locking is notoriously unreliable in | ||||
| // network file systems and may not suffice to protect the cache. | ||||
| // | ||||
| func Open(dir string) (*Cache, error) { | ||||
| 	info, err := os.Stat(dir) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	if !info.IsDir() { | ||||
| 		return nil, &os.PathError{Op: "open", Path: dir, Err: fmt.Errorf("not a directory")} | ||||
| 	} | ||||
| 	for i := 0; i < 256; i++ { | ||||
| 		name := filepath.Join(dir, fmt.Sprintf("%02x", i)) | ||||
| 		if err := os.MkdirAll(name, 0777); err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 	} | ||||
| 	c := &Cache{ | ||||
| 		dir: dir, | ||||
| 		now: time.Now, | ||||
| 	} | ||||
| 	return c, nil | ||||
| } | ||||
|  | ||||
| // fileName returns the name of the file corresponding to the given id. | ||||
| func (c *Cache) fileName(id [HashSize]byte, key string) string { | ||||
| 	return filepath.Join(c.dir, fmt.Sprintf("%02x", id[0]), fmt.Sprintf("%x", id)+"-"+key) | ||||
| } | ||||
|  | ||||
| var errMissing = errors.New("cache entry not found") | ||||
|  | ||||
| const ( | ||||
| 	// action entry file is "v1 <hex id> <hex out> <decimal size space-padded to 20 bytes> <unixnano space-padded to 20 bytes>\n" | ||||
| 	hexSize   = HashSize * 2 | ||||
| 	entrySize = 2 + 1 + hexSize + 1 + hexSize + 1 + 20 + 1 + 20 + 1 | ||||
| ) | ||||
|  | ||||
| // verify controls whether to run the cache in verify mode. | ||||
| // In verify mode, the cache always returns errMissing from Get | ||||
| // but then double-checks in Put that the data being written | ||||
| // exactly matches any existing entry. This provides an easy | ||||
| // way to detect program behavior that would have been different | ||||
| // had the cache entry been returned from Get. | ||||
| // | ||||
| // verify is enabled by setting the environment variable | ||||
| // GODEBUG=gocacheverify=1. | ||||
| var verify = false | ||||
|  | ||||
| // DebugTest is set when GODEBUG=gocachetest=1 is in the environment. | ||||
| var DebugTest = false | ||||
|  | ||||
| func init() { initEnv() } | ||||
|  | ||||
| func initEnv() { | ||||
| 	verify = false | ||||
| 	debugHash = false | ||||
| 	debug := strings.Split(os.Getenv("GODEBUG"), ",") | ||||
| 	for _, f := range debug { | ||||
| 		if f == "gocacheverify=1" { | ||||
| 			verify = true | ||||
| 		} | ||||
| 		if f == "gocachehash=1" { | ||||
| 			debugHash = true | ||||
| 		} | ||||
| 		if f == "gocachetest=1" { | ||||
| 			DebugTest = true | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Get looks up the action ID in the cache, | ||||
| // returning the corresponding output ID and file size, if any. | ||||
| // Note that finding an output ID does not guarantee that the | ||||
| // saved file for that output ID is still available. | ||||
| func (c *Cache) Get(id ActionID) (Entry, error) { | ||||
| 	if verify { | ||||
| 		return Entry{}, errMissing | ||||
| 	} | ||||
| 	return c.get(id) | ||||
| } | ||||
|  | ||||
| type Entry struct { | ||||
| 	OutputID OutputID | ||||
| 	Size     int64 | ||||
| 	Time     time.Time | ||||
| } | ||||
|  | ||||
| // get is Get but does not respect verify mode, so that Put can use it. | ||||
| func (c *Cache) get(id ActionID) (Entry, error) { | ||||
| 	missing := func() (Entry, error) { | ||||
| 		return Entry{}, errMissing | ||||
| 	} | ||||
| 	f, err := os.Open(c.fileName(id, "a")) | ||||
| 	if err != nil { | ||||
| 		return missing() | ||||
| 	} | ||||
| 	defer f.Close() | ||||
| 	entry := make([]byte, entrySize+1) // +1 to detect whether f is too long | ||||
| 	if n, err := io.ReadFull(f, entry); n != entrySize || err != io.ErrUnexpectedEOF { | ||||
| 		return missing() | ||||
| 	} | ||||
| 	if entry[0] != 'v' || entry[1] != '1' || entry[2] != ' ' || entry[3+hexSize] != ' ' || entry[3+hexSize+1+hexSize] != ' ' || entry[3+hexSize+1+hexSize+1+20] != ' ' || entry[entrySize-1] != '\n' { | ||||
| 		return missing() | ||||
| 	} | ||||
| 	eid, entry := entry[3:3+hexSize], entry[3+hexSize:] | ||||
| 	eout, entry := entry[1:1+hexSize], entry[1+hexSize:] | ||||
| 	esize, entry := entry[1:1+20], entry[1+20:] | ||||
| 	//lint:ignore SA4006 See https://github.com/dominikh/go-tools/issues/465 | ||||
| 	etime, entry := entry[1:1+20], entry[1+20:] | ||||
| 	var buf [HashSize]byte | ||||
| 	if _, err := hex.Decode(buf[:], eid); err != nil || buf != id { | ||||
| 		return missing() | ||||
| 	} | ||||
| 	if _, err := hex.Decode(buf[:], eout); err != nil { | ||||
| 		return missing() | ||||
| 	} | ||||
| 	i := 0 | ||||
| 	for i < len(esize) && esize[i] == ' ' { | ||||
| 		i++ | ||||
| 	} | ||||
| 	size, err := strconv.ParseInt(string(esize[i:]), 10, 64) | ||||
| 	if err != nil || size < 0 { | ||||
| 		return missing() | ||||
| 	} | ||||
| 	i = 0 | ||||
| 	for i < len(etime) && etime[i] == ' ' { | ||||
| 		i++ | ||||
| 	} | ||||
| 	tm, err := strconv.ParseInt(string(etime[i:]), 10, 64) | ||||
| 	if err != nil || size < 0 { | ||||
| 		return missing() | ||||
| 	} | ||||
|  | ||||
| 	c.used(c.fileName(id, "a")) | ||||
|  | ||||
| 	return Entry{buf, size, time.Unix(0, tm)}, nil | ||||
| } | ||||
|  | ||||
| // GetFile looks up the action ID in the cache and returns | ||||
| // the name of the corresponding data file. | ||||
| func (c *Cache) GetFile(id ActionID) (file string, entry Entry, err error) { | ||||
| 	entry, err = c.Get(id) | ||||
| 	if err != nil { | ||||
| 		return "", Entry{}, err | ||||
| 	} | ||||
| 	file = c.OutputFile(entry.OutputID) | ||||
| 	info, err := os.Stat(file) | ||||
| 	if err != nil || info.Size() != entry.Size { | ||||
| 		return "", Entry{}, errMissing | ||||
| 	} | ||||
| 	return file, entry, nil | ||||
| } | ||||
|  | ||||
| // GetBytes looks up the action ID in the cache and returns | ||||
| // the corresponding output bytes. | ||||
| // GetBytes should only be used for data that can be expected to fit in memory. | ||||
| func (c *Cache) GetBytes(id ActionID) ([]byte, Entry, error) { | ||||
| 	entry, err := c.Get(id) | ||||
| 	if err != nil { | ||||
| 		return nil, entry, err | ||||
| 	} | ||||
| 	data, _ := ioutil.ReadFile(c.OutputFile(entry.OutputID)) | ||||
| 	if sha256.Sum256(data) != entry.OutputID { | ||||
| 		return nil, entry, errMissing | ||||
| 	} | ||||
| 	return data, entry, nil | ||||
| } | ||||
|  | ||||
| // OutputFile returns the name of the cache file storing output with the given OutputID. | ||||
| func (c *Cache) OutputFile(out OutputID) string { | ||||
| 	file := c.fileName(out, "d") | ||||
| 	c.used(file) | ||||
| 	return file | ||||
| } | ||||
|  | ||||
| // Time constants for cache expiration. | ||||
| // | ||||
| // We set the mtime on a cache file on each use, but at most one per mtimeInterval (1 hour), | ||||
| // to avoid causing many unnecessary inode updates. The mtimes therefore | ||||
| // roughly reflect "time of last use" but may in fact be older by at most an hour. | ||||
| // | ||||
| // We scan the cache for entries to delete at most once per trimInterval (1 day). | ||||
| // | ||||
| // When we do scan the cache, we delete entries that have not been used for | ||||
| // at least trimLimit (5 days). Statistics gathered from a month of usage by | ||||
| // Go developers found that essentially all reuse of cached entries happened | ||||
| // within 5 days of the previous reuse. See golang.org/issue/22990. | ||||
| const ( | ||||
| 	mtimeInterval = 1 * time.Hour | ||||
| 	trimInterval  = 24 * time.Hour | ||||
| 	trimLimit     = 5 * 24 * time.Hour | ||||
| ) | ||||
|  | ||||
| // used makes a best-effort attempt to update mtime on file, | ||||
| // so that mtime reflects cache access time. | ||||
| // | ||||
| // Because the reflection only needs to be approximate, | ||||
| // and to reduce the amount of disk activity caused by using | ||||
| // cache entries, used only updates the mtime if the current | ||||
| // mtime is more than an hour old. This heuristic eliminates | ||||
| // nearly all of the mtime updates that would otherwise happen, | ||||
| // while still keeping the mtimes useful for cache trimming. | ||||
| func (c *Cache) used(file string) { | ||||
| 	info, err := os.Stat(file) | ||||
| 	if err == nil && c.now().Sub(info.ModTime()) < mtimeInterval { | ||||
| 		return | ||||
| 	} | ||||
| 	os.Chtimes(file, c.now(), c.now()) | ||||
| } | ||||
|  | ||||
| // Trim removes old cache entries that are likely not to be reused. | ||||
| func (c *Cache) Trim() { | ||||
| 	now := c.now() | ||||
|  | ||||
| 	// We maintain in dir/trim.txt the time of the last completed cache trim. | ||||
| 	// If the cache has been trimmed recently enough, do nothing. | ||||
| 	// This is the common case. | ||||
| 	data, _ := ioutil.ReadFile(filepath.Join(c.dir, "trim.txt")) | ||||
| 	t, err := strconv.ParseInt(strings.TrimSpace(string(data)), 10, 64) | ||||
| 	if err == nil && now.Sub(time.Unix(t, 0)) < trimInterval { | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	// Trim each of the 256 subdirectories. | ||||
| 	// We subtract an additional mtimeInterval | ||||
| 	// to account for the imprecision of our "last used" mtimes. | ||||
| 	cutoff := now.Add(-trimLimit - mtimeInterval) | ||||
| 	for i := 0; i < 256; i++ { | ||||
| 		subdir := filepath.Join(c.dir, fmt.Sprintf("%02x", i)) | ||||
| 		c.trimSubdir(subdir, cutoff) | ||||
| 	} | ||||
|  | ||||
| 	// Ignore errors from here: if we don't write the complete timestamp, the | ||||
| 	// cache will appear older than it is, and we'll trim it again next time. | ||||
| 	renameio.WriteFile(filepath.Join(c.dir, "trim.txt"), []byte(fmt.Sprintf("%d", now.Unix()))) | ||||
| } | ||||
|  | ||||
| // trimSubdir trims a single cache subdirectory. | ||||
| func (c *Cache) trimSubdir(subdir string, cutoff time.Time) { | ||||
| 	// Read all directory entries from subdir before removing | ||||
| 	// any files, in case removing files invalidates the file offset | ||||
| 	// in the directory scan. Also, ignore error from f.Readdirnames, | ||||
| 	// because we don't care about reporting the error and we still | ||||
| 	// want to process any entries found before the error. | ||||
| 	f, err := os.Open(subdir) | ||||
| 	if err != nil { | ||||
| 		return | ||||
| 	} | ||||
| 	names, _ := f.Readdirnames(-1) | ||||
| 	f.Close() | ||||
|  | ||||
| 	for _, name := range names { | ||||
| 		// Remove only cache entries (xxxx-a and xxxx-d). | ||||
| 		if !strings.HasSuffix(name, "-a") && !strings.HasSuffix(name, "-d") { | ||||
| 			continue | ||||
| 		} | ||||
| 		entry := filepath.Join(subdir, name) | ||||
| 		info, err := os.Stat(entry) | ||||
| 		if err == nil && info.ModTime().Before(cutoff) { | ||||
| 			os.Remove(entry) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // putIndexEntry adds an entry to the cache recording that executing the action | ||||
| // with the given id produces an output with the given output id (hash) and size. | ||||
| func (c *Cache) putIndexEntry(id ActionID, out OutputID, size int64, allowVerify bool) error { | ||||
| 	// Note: We expect that for one reason or another it may happen | ||||
| 	// that repeating an action produces a different output hash | ||||
| 	// (for example, if the output contains a time stamp or temp dir name). | ||||
| 	// While not ideal, this is also not a correctness problem, so we | ||||
| 	// don't make a big deal about it. In particular, we leave the action | ||||
| 	// cache entries writable specifically so that they can be overwritten. | ||||
| 	// | ||||
| 	// Setting GODEBUG=gocacheverify=1 does make a big deal: | ||||
| 	// in verify mode we are double-checking that the cache entries | ||||
| 	// are entirely reproducible. As just noted, this may be unrealistic | ||||
| 	// in some cases but the check is also useful for shaking out real bugs. | ||||
| 	entry := []byte(fmt.Sprintf("v1 %x %x %20d %20d\n", id, out, size, time.Now().UnixNano())) | ||||
| 	if verify && allowVerify { | ||||
| 		old, err := c.get(id) | ||||
| 		if err == nil && (old.OutputID != out || old.Size != size) { | ||||
| 			// panic to show stack trace, so we can see what code is generating this cache entry. | ||||
| 			msg := fmt.Sprintf("go: internal cache error: cache verify failed: id=%x changed:<<<\n%s\n>>>\nold: %x %d\nnew: %x %d", id, reverseHash(id), out, size, old.OutputID, old.Size) | ||||
| 			panic(msg) | ||||
| 		} | ||||
| 	} | ||||
| 	file := c.fileName(id, "a") | ||||
| 	if err := ioutil.WriteFile(file, entry, 0666); err != nil { | ||||
| 		// TODO(bcmills): This Remove potentially races with another go command writing to file. | ||||
| 		// Can we eliminate it? | ||||
| 		os.Remove(file) | ||||
| 		return err | ||||
| 	} | ||||
| 	os.Chtimes(file, c.now(), c.now()) // mainly for tests | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // Put stores the given output in the cache as the output for the action ID. | ||||
| // It may read file twice. The content of file must not change between the two passes. | ||||
| func (c *Cache) Put(id ActionID, file io.ReadSeeker) (OutputID, int64, error) { | ||||
| 	return c.put(id, file, true) | ||||
| } | ||||
|  | ||||
| // PutNoVerify is like Put but disables the verify check | ||||
| // when GODEBUG=goverifycache=1 is set. | ||||
| // It is meant for data that is OK to cache but that we expect to vary slightly from run to run, | ||||
| // like test output containing times and the like. | ||||
| func (c *Cache) PutNoVerify(id ActionID, file io.ReadSeeker) (OutputID, int64, error) { | ||||
| 	return c.put(id, file, false) | ||||
| } | ||||
|  | ||||
| func (c *Cache) put(id ActionID, file io.ReadSeeker, allowVerify bool) (OutputID, int64, error) { | ||||
| 	// Compute output ID. | ||||
| 	h := sha256.New() | ||||
| 	if _, err := file.Seek(0, 0); err != nil { | ||||
| 		return OutputID{}, 0, err | ||||
| 	} | ||||
| 	size, err := io.Copy(h, file) | ||||
| 	if err != nil { | ||||
| 		return OutputID{}, 0, err | ||||
| 	} | ||||
| 	var out OutputID | ||||
| 	h.Sum(out[:0]) | ||||
|  | ||||
| 	// Copy to cached output file (if not already present). | ||||
| 	if err := c.copyFile(file, out, size); err != nil { | ||||
| 		return out, size, err | ||||
| 	} | ||||
|  | ||||
| 	// Add to cache index. | ||||
| 	return out, size, c.putIndexEntry(id, out, size, allowVerify) | ||||
| } | ||||
|  | ||||
| // PutBytes stores the given bytes in the cache as the output for the action ID. | ||||
| func (c *Cache) PutBytes(id ActionID, data []byte) error { | ||||
| 	_, _, err := c.Put(id, bytes.NewReader(data)) | ||||
| 	return err | ||||
| } | ||||
|  | ||||
| // copyFile copies file into the cache, expecting it to have the given | ||||
| // output ID and size, if that file is not present already. | ||||
| func (c *Cache) copyFile(file io.ReadSeeker, out OutputID, size int64) error { | ||||
| 	name := c.fileName(out, "d") | ||||
| 	info, err := os.Stat(name) | ||||
| 	if err == nil && info.Size() == size { | ||||
| 		// Check hash. | ||||
| 		if f, err := os.Open(name); err == nil { | ||||
| 			h := sha256.New() | ||||
| 			io.Copy(h, f) | ||||
| 			f.Close() | ||||
| 			var out2 OutputID | ||||
| 			h.Sum(out2[:0]) | ||||
| 			if out == out2 { | ||||
| 				return nil | ||||
| 			} | ||||
| 		} | ||||
| 		// Hash did not match. Fall through and rewrite file. | ||||
| 	} | ||||
|  | ||||
| 	// Copy file to cache directory. | ||||
| 	mode := os.O_RDWR | os.O_CREATE | ||||
| 	if err == nil && info.Size() > size { // shouldn't happen but fix in case | ||||
| 		mode |= os.O_TRUNC | ||||
| 	} | ||||
| 	f, err := os.OpenFile(name, mode, 0666) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	defer f.Close() | ||||
| 	if size == 0 { | ||||
| 		// File now exists with correct size. | ||||
| 		// Only one possible zero-length file, so contents are OK too. | ||||
| 		// Early return here makes sure there's a "last byte" for code below. | ||||
| 		return nil | ||||
| 	} | ||||
|  | ||||
| 	// From here on, if any of the I/O writing the file fails, | ||||
| 	// we make a best-effort attempt to truncate the file f | ||||
| 	// before returning, to avoid leaving bad bytes in the file. | ||||
|  | ||||
| 	// Copy file to f, but also into h to double-check hash. | ||||
| 	if _, err := file.Seek(0, 0); err != nil { | ||||
| 		f.Truncate(0) | ||||
| 		return err | ||||
| 	} | ||||
| 	h := sha256.New() | ||||
| 	w := io.MultiWriter(f, h) | ||||
| 	if _, err := io.CopyN(w, file, size-1); err != nil { | ||||
| 		f.Truncate(0) | ||||
| 		return err | ||||
| 	} | ||||
| 	// Check last byte before writing it; writing it will make the size match | ||||
| 	// what other processes expect to find and might cause them to start | ||||
| 	// using the file. | ||||
| 	buf := make([]byte, 1) | ||||
| 	if _, err := file.Read(buf); err != nil { | ||||
| 		f.Truncate(0) | ||||
| 		return err | ||||
| 	} | ||||
| 	h.Write(buf) | ||||
| 	sum := h.Sum(nil) | ||||
| 	if !bytes.Equal(sum, out[:]) { | ||||
| 		f.Truncate(0) | ||||
| 		return fmt.Errorf("file content changed underfoot") | ||||
| 	} | ||||
|  | ||||
| 	// Commit cache file entry. | ||||
| 	if _, err := f.Write(buf); err != nil { | ||||
| 		f.Truncate(0) | ||||
| 		return err | ||||
| 	} | ||||
| 	if err := f.Close(); err != nil { | ||||
| 		// Data might not have been written, | ||||
| 		// but file may look like it is the right size. | ||||
| 		// To be extra careful, remove cached file. | ||||
| 		os.Remove(name) | ||||
| 		return err | ||||
| 	} | ||||
| 	os.Chtimes(name, c.now(), c.now()) // mainly for tests | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
							
								
								
									
										85
									
								
								vendor/honnef.co/go/tools/internal/cache/default.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										85
									
								
								vendor/honnef.co/go/tools/internal/cache/default.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,85 @@ | ||||
| // Copyright 2017 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package cache | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"io/ioutil" | ||||
| 	"log" | ||||
| 	"os" | ||||
| 	"path/filepath" | ||||
| 	"sync" | ||||
| ) | ||||
|  | ||||
| // Default returns the default cache to use. | ||||
| func Default() (*Cache, error) { | ||||
| 	defaultOnce.Do(initDefaultCache) | ||||
| 	return defaultCache, defaultDirErr | ||||
| } | ||||
|  | ||||
| var ( | ||||
| 	defaultOnce  sync.Once | ||||
| 	defaultCache *Cache | ||||
| ) | ||||
|  | ||||
| // cacheREADME is a message stored in a README in the cache directory. | ||||
| // Because the cache lives outside the normal Go trees, we leave the | ||||
| // README as a courtesy to explain where it came from. | ||||
| const cacheREADME = `This directory holds cached build artifacts from staticcheck. | ||||
| ` | ||||
|  | ||||
| // initDefaultCache does the work of finding the default cache | ||||
| // the first time Default is called. | ||||
| func initDefaultCache() { | ||||
| 	dir := DefaultDir() | ||||
| 	if err := os.MkdirAll(dir, 0777); err != nil { | ||||
| 		log.Fatalf("failed to initialize build cache at %s: %s\n", dir, err) | ||||
| 	} | ||||
| 	if _, err := os.Stat(filepath.Join(dir, "README")); err != nil { | ||||
| 		// Best effort. | ||||
| 		ioutil.WriteFile(filepath.Join(dir, "README"), []byte(cacheREADME), 0666) | ||||
| 	} | ||||
|  | ||||
| 	c, err := Open(dir) | ||||
| 	if err != nil { | ||||
| 		log.Fatalf("failed to initialize build cache at %s: %s\n", dir, err) | ||||
| 	} | ||||
| 	defaultCache = c | ||||
| } | ||||
|  | ||||
| var ( | ||||
| 	defaultDirOnce sync.Once | ||||
| 	defaultDir     string | ||||
| 	defaultDirErr  error | ||||
| ) | ||||
|  | ||||
| // DefaultDir returns the effective STATICCHECK_CACHE setting. | ||||
| func DefaultDir() string { | ||||
| 	// Save the result of the first call to DefaultDir for later use in | ||||
| 	// initDefaultCache. cmd/go/main.go explicitly sets GOCACHE so that | ||||
| 	// subprocesses will inherit it, but that means initDefaultCache can't | ||||
| 	// otherwise distinguish between an explicit "off" and a UserCacheDir error. | ||||
|  | ||||
| 	defaultDirOnce.Do(func() { | ||||
| 		defaultDir = os.Getenv("STATICCHECK_CACHE") | ||||
| 		if filepath.IsAbs(defaultDir) { | ||||
| 			return | ||||
| 		} | ||||
| 		if defaultDir != "" { | ||||
| 			defaultDirErr = fmt.Errorf("STATICCHECK_CACHE is not an absolute path") | ||||
| 			return | ||||
| 		} | ||||
|  | ||||
| 		// Compute default location. | ||||
| 		dir, err := os.UserCacheDir() | ||||
| 		if err != nil { | ||||
| 			defaultDirErr = fmt.Errorf("STATICCHECK_CACHE is not defined and %v", err) | ||||
| 			return | ||||
| 		} | ||||
| 		defaultDir = filepath.Join(dir, "staticcheck") | ||||
| 	}) | ||||
|  | ||||
| 	return defaultDir | ||||
| } | ||||
							
								
								
									
										176
									
								
								vendor/honnef.co/go/tools/internal/cache/hash.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										176
									
								
								vendor/honnef.co/go/tools/internal/cache/hash.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,176 @@ | ||||
| // Copyright 2017 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| package cache | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"crypto/sha256" | ||||
| 	"fmt" | ||||
| 	"hash" | ||||
| 	"io" | ||||
| 	"os" | ||||
| 	"sync" | ||||
| ) | ||||
|  | ||||
| var debugHash = false // set when GODEBUG=gocachehash=1 | ||||
|  | ||||
| // HashSize is the number of bytes in a hash. | ||||
| const HashSize = 32 | ||||
|  | ||||
| // A Hash provides access to the canonical hash function used to index the cache. | ||||
| // The current implementation uses salted SHA256, but clients must not assume this. | ||||
| type Hash struct { | ||||
| 	h    hash.Hash | ||||
| 	name string        // for debugging | ||||
| 	buf  *bytes.Buffer // for verify | ||||
| } | ||||
|  | ||||
| // hashSalt is a salt string added to the beginning of every hash | ||||
| // created by NewHash. Using the Staticcheck version makes sure that different | ||||
| // versions of the command do not address the same cache | ||||
| // entries, so that a bug in one version does not affect the execution | ||||
| // of other versions. This salt will result in additional ActionID files | ||||
| // in the cache, but not additional copies of the large output files, | ||||
| // which are still addressed by unsalted SHA256. | ||||
| var hashSalt []byte | ||||
|  | ||||
| func SetSalt(b []byte) { | ||||
| 	hashSalt = b | ||||
| } | ||||
|  | ||||
| // Subkey returns an action ID corresponding to mixing a parent | ||||
| // action ID with a string description of the subkey. | ||||
| func Subkey(parent ActionID, desc string) ActionID { | ||||
| 	h := sha256.New() | ||||
| 	h.Write([]byte("subkey:")) | ||||
| 	h.Write(parent[:]) | ||||
| 	h.Write([]byte(desc)) | ||||
| 	var out ActionID | ||||
| 	h.Sum(out[:0]) | ||||
| 	if debugHash { | ||||
| 		fmt.Fprintf(os.Stderr, "HASH subkey %x %q = %x\n", parent, desc, out) | ||||
| 	} | ||||
| 	if verify { | ||||
| 		hashDebug.Lock() | ||||
| 		hashDebug.m[out] = fmt.Sprintf("subkey %x %q", parent, desc) | ||||
| 		hashDebug.Unlock() | ||||
| 	} | ||||
| 	return out | ||||
| } | ||||
|  | ||||
| // NewHash returns a new Hash. | ||||
| // The caller is expected to Write data to it and then call Sum. | ||||
| func NewHash(name string) *Hash { | ||||
| 	h := &Hash{h: sha256.New(), name: name} | ||||
| 	if debugHash { | ||||
| 		fmt.Fprintf(os.Stderr, "HASH[%s]\n", h.name) | ||||
| 	} | ||||
| 	h.Write(hashSalt) | ||||
| 	if verify { | ||||
| 		h.buf = new(bytes.Buffer) | ||||
| 	} | ||||
| 	return h | ||||
| } | ||||
|  | ||||
| // Write writes data to the running hash. | ||||
| func (h *Hash) Write(b []byte) (int, error) { | ||||
| 	if debugHash { | ||||
| 		fmt.Fprintf(os.Stderr, "HASH[%s]: %q\n", h.name, b) | ||||
| 	} | ||||
| 	if h.buf != nil { | ||||
| 		h.buf.Write(b) | ||||
| 	} | ||||
| 	return h.h.Write(b) | ||||
| } | ||||
|  | ||||
| // Sum returns the hash of the data written previously. | ||||
| func (h *Hash) Sum() [HashSize]byte { | ||||
| 	var out [HashSize]byte | ||||
| 	h.h.Sum(out[:0]) | ||||
| 	if debugHash { | ||||
| 		fmt.Fprintf(os.Stderr, "HASH[%s]: %x\n", h.name, out) | ||||
| 	} | ||||
| 	if h.buf != nil { | ||||
| 		hashDebug.Lock() | ||||
| 		if hashDebug.m == nil { | ||||
| 			hashDebug.m = make(map[[HashSize]byte]string) | ||||
| 		} | ||||
| 		hashDebug.m[out] = h.buf.String() | ||||
| 		hashDebug.Unlock() | ||||
| 	} | ||||
| 	return out | ||||
| } | ||||
|  | ||||
| // In GODEBUG=gocacheverify=1 mode, | ||||
| // hashDebug holds the input to every computed hash ID, | ||||
| // so that we can work backward from the ID involved in a | ||||
| // cache entry mismatch to a description of what should be there. | ||||
| var hashDebug struct { | ||||
| 	sync.Mutex | ||||
| 	m map[[HashSize]byte]string | ||||
| } | ||||
|  | ||||
| // reverseHash returns the input used to compute the hash id. | ||||
| func reverseHash(id [HashSize]byte) string { | ||||
| 	hashDebug.Lock() | ||||
| 	s := hashDebug.m[id] | ||||
| 	hashDebug.Unlock() | ||||
| 	return s | ||||
| } | ||||
|  | ||||
| var hashFileCache struct { | ||||
| 	sync.Mutex | ||||
| 	m map[string][HashSize]byte | ||||
| } | ||||
|  | ||||
| // FileHash returns the hash of the named file. | ||||
| // It caches repeated lookups for a given file, | ||||
| // and the cache entry for a file can be initialized | ||||
| // using SetFileHash. | ||||
| // The hash used by FileHash is not the same as | ||||
| // the hash used by NewHash. | ||||
| func FileHash(file string) ([HashSize]byte, error) { | ||||
| 	hashFileCache.Lock() | ||||
| 	out, ok := hashFileCache.m[file] | ||||
| 	hashFileCache.Unlock() | ||||
|  | ||||
| 	if ok { | ||||
| 		return out, nil | ||||
| 	} | ||||
|  | ||||
| 	h := sha256.New() | ||||
| 	f, err := os.Open(file) | ||||
| 	if err != nil { | ||||
| 		if debugHash { | ||||
| 			fmt.Fprintf(os.Stderr, "HASH %s: %v\n", file, err) | ||||
| 		} | ||||
| 		return [HashSize]byte{}, err | ||||
| 	} | ||||
| 	_, err = io.Copy(h, f) | ||||
| 	f.Close() | ||||
| 	if err != nil { | ||||
| 		if debugHash { | ||||
| 			fmt.Fprintf(os.Stderr, "HASH %s: %v\n", file, err) | ||||
| 		} | ||||
| 		return [HashSize]byte{}, err | ||||
| 	} | ||||
| 	h.Sum(out[:0]) | ||||
| 	if debugHash { | ||||
| 		fmt.Fprintf(os.Stderr, "HASH %s: %x\n", file, out) | ||||
| 	} | ||||
|  | ||||
| 	SetFileHash(file, out) | ||||
| 	return out, nil | ||||
| } | ||||
|  | ||||
| // SetFileHash sets the hash returned by FileHash for file. | ||||
| func SetFileHash(file string, sum [HashSize]byte) { | ||||
| 	hashFileCache.Lock() | ||||
| 	if hashFileCache.m == nil { | ||||
| 		hashFileCache.m = make(map[string][HashSize]byte) | ||||
| 	} | ||||
| 	hashFileCache.m[file] = sum | ||||
| 	hashFileCache.Unlock() | ||||
| } | ||||
							
								
								
									
										27
									
								
								vendor/honnef.co/go/tools/internal/passes/buildssa/BUILD
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										27
									
								
								vendor/honnef.co/go/tools/internal/passes/buildssa/BUILD
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,27 @@ | ||||
| load("@io_bazel_rules_go//go:def.bzl", "go_library") | ||||
|  | ||||
| go_library( | ||||
|     name = "go_default_library", | ||||
|     srcs = ["buildssa.go"], | ||||
|     importmap = "k8s.io/kubernetes/vendor/honnef.co/go/tools/internal/passes/buildssa", | ||||
|     importpath = "honnef.co/go/tools/internal/passes/buildssa", | ||||
|     visibility = ["//vendor/honnef.co/go/tools:__subpackages__"], | ||||
|     deps = [ | ||||
|         "//vendor/golang.org/x/tools/go/analysis:go_default_library", | ||||
|         "//vendor/honnef.co/go/tools/ssa:go_default_library", | ||||
|     ], | ||||
| ) | ||||
|  | ||||
| filegroup( | ||||
|     name = "package-srcs", | ||||
|     srcs = glob(["**"]), | ||||
|     tags = ["automanaged"], | ||||
|     visibility = ["//visibility:private"], | ||||
| ) | ||||
|  | ||||
| filegroup( | ||||
|     name = "all-srcs", | ||||
|     srcs = [":package-srcs"], | ||||
|     tags = ["automanaged"], | ||||
|     visibility = ["//visibility:public"], | ||||
| ) | ||||
							
								
								
									
										116
									
								
								vendor/honnef.co/go/tools/internal/passes/buildssa/buildssa.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										116
									
								
								vendor/honnef.co/go/tools/internal/passes/buildssa/buildssa.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,116 @@ | ||||
| // Copyright 2018 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| // Package buildssa defines an Analyzer that constructs the SSA | ||||
| // representation of an error-free package and returns the set of all | ||||
| // functions within it. It does not report any diagnostics itself but | ||||
| // may be used as an input to other analyzers. | ||||
| // | ||||
| // THIS INTERFACE IS EXPERIMENTAL AND MAY BE SUBJECT TO INCOMPATIBLE CHANGE. | ||||
| package buildssa | ||||
|  | ||||
| import ( | ||||
| 	"go/ast" | ||||
| 	"go/types" | ||||
| 	"reflect" | ||||
|  | ||||
| 	"golang.org/x/tools/go/analysis" | ||||
| 	"honnef.co/go/tools/ssa" | ||||
| ) | ||||
|  | ||||
| var Analyzer = &analysis.Analyzer{ | ||||
| 	Name:       "buildssa", | ||||
| 	Doc:        "build SSA-form IR for later passes", | ||||
| 	Run:        run, | ||||
| 	ResultType: reflect.TypeOf(new(SSA)), | ||||
| } | ||||
|  | ||||
| // SSA provides SSA-form intermediate representation for all the | ||||
| // non-blank source functions in the current package. | ||||
| type SSA struct { | ||||
| 	Pkg      *ssa.Package | ||||
| 	SrcFuncs []*ssa.Function | ||||
| } | ||||
|  | ||||
| func run(pass *analysis.Pass) (interface{}, error) { | ||||
| 	// Plundered from ssautil.BuildPackage. | ||||
|  | ||||
| 	// We must create a new Program for each Package because the | ||||
| 	// analysis API provides no place to hang a Program shared by | ||||
| 	// all Packages. Consequently, SSA Packages and Functions do not | ||||
| 	// have a canonical representation across an analysis session of | ||||
| 	// multiple packages. This is unlikely to be a problem in | ||||
| 	// practice because the analysis API essentially forces all | ||||
| 	// packages to be analysed independently, so any given call to | ||||
| 	// Analysis.Run on a package will see only SSA objects belonging | ||||
| 	// to a single Program. | ||||
|  | ||||
| 	mode := ssa.GlobalDebug | ||||
|  | ||||
| 	prog := ssa.NewProgram(pass.Fset, mode) | ||||
|  | ||||
| 	// Create SSA packages for all imports. | ||||
| 	// Order is not significant. | ||||
| 	created := make(map[*types.Package]bool) | ||||
| 	var createAll func(pkgs []*types.Package) | ||||
| 	createAll = func(pkgs []*types.Package) { | ||||
| 		for _, p := range pkgs { | ||||
| 			if !created[p] { | ||||
| 				created[p] = true | ||||
| 				prog.CreatePackage(p, nil, nil, true) | ||||
| 				createAll(p.Imports()) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	createAll(pass.Pkg.Imports()) | ||||
|  | ||||
| 	// Create and build the primary package. | ||||
| 	ssapkg := prog.CreatePackage(pass.Pkg, pass.Files, pass.TypesInfo, false) | ||||
| 	ssapkg.Build() | ||||
|  | ||||
| 	// Compute list of source functions, including literals, | ||||
| 	// in source order. | ||||
| 	var funcs []*ssa.Function | ||||
| 	var addAnons func(f *ssa.Function) | ||||
| 	addAnons = func(f *ssa.Function) { | ||||
| 		funcs = append(funcs, f) | ||||
| 		for _, anon := range f.AnonFuncs { | ||||
| 			addAnons(anon) | ||||
| 		} | ||||
| 	} | ||||
| 	addAnons(ssapkg.Members["init"].(*ssa.Function)) | ||||
| 	for _, f := range pass.Files { | ||||
| 		for _, decl := range f.Decls { | ||||
| 			if fdecl, ok := decl.(*ast.FuncDecl); ok { | ||||
|  | ||||
| 				// SSA will not build a Function | ||||
| 				// for a FuncDecl named blank. | ||||
| 				// That's arguably too strict but | ||||
| 				// relaxing it would break uniqueness of | ||||
| 				// names of package members. | ||||
| 				if fdecl.Name.Name == "_" { | ||||
| 					continue | ||||
| 				} | ||||
|  | ||||
| 				// (init functions have distinct Func | ||||
| 				// objects named "init" and distinct | ||||
| 				// ssa.Functions named "init#1", ...) | ||||
|  | ||||
| 				fn := pass.TypesInfo.Defs[fdecl.Name].(*types.Func) | ||||
| 				if fn == nil { | ||||
| 					panic(fn) | ||||
| 				} | ||||
|  | ||||
| 				f := ssapkg.Prog.FuncValue(fn) | ||||
| 				if f == nil { | ||||
| 					panic(fn) | ||||
| 				} | ||||
|  | ||||
| 				addAnons(f) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return &SSA{Pkg: ssapkg, SrcFuncs: funcs}, nil | ||||
| } | ||||
							
								
								
									
										23
									
								
								vendor/honnef.co/go/tools/internal/renameio/BUILD
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										23
									
								
								vendor/honnef.co/go/tools/internal/renameio/BUILD
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,23 @@ | ||||
| load("@io_bazel_rules_go//go:def.bzl", "go_library") | ||||
|  | ||||
| go_library( | ||||
|     name = "go_default_library", | ||||
|     srcs = ["renameio.go"], | ||||
|     importmap = "k8s.io/kubernetes/vendor/honnef.co/go/tools/internal/renameio", | ||||
|     importpath = "honnef.co/go/tools/internal/renameio", | ||||
|     visibility = ["//vendor/honnef.co/go/tools:__subpackages__"], | ||||
| ) | ||||
|  | ||||
| filegroup( | ||||
|     name = "package-srcs", | ||||
|     srcs = glob(["**"]), | ||||
|     tags = ["automanaged"], | ||||
|     visibility = ["//visibility:private"], | ||||
| ) | ||||
|  | ||||
| filegroup( | ||||
|     name = "all-srcs", | ||||
|     srcs = [":package-srcs"], | ||||
|     tags = ["automanaged"], | ||||
|     visibility = ["//visibility:public"], | ||||
| ) | ||||
							
								
								
									
										83
									
								
								vendor/honnef.co/go/tools/internal/renameio/renameio.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										83
									
								
								vendor/honnef.co/go/tools/internal/renameio/renameio.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,83 @@ | ||||
| // Copyright 2018 The Go Authors. All rights reserved. | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file. | ||||
|  | ||||
| // Package renameio writes files atomically by renaming temporary files. | ||||
| package renameio | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"io" | ||||
| 	"io/ioutil" | ||||
| 	"os" | ||||
| 	"path/filepath" | ||||
| 	"runtime" | ||||
| 	"strings" | ||||
| 	"time" | ||||
| ) | ||||
|  | ||||
| const patternSuffix = "*.tmp" | ||||
|  | ||||
| // Pattern returns a glob pattern that matches the unrenamed temporary files | ||||
| // created when writing to filename. | ||||
| func Pattern(filename string) string { | ||||
| 	return filepath.Join(filepath.Dir(filename), filepath.Base(filename)+patternSuffix) | ||||
| } | ||||
|  | ||||
| // WriteFile is like ioutil.WriteFile, but first writes data to an arbitrary | ||||
| // file in the same directory as filename, then renames it atomically to the | ||||
| // final name. | ||||
| // | ||||
| // That ensures that the final location, if it exists, is always a complete file. | ||||
| func WriteFile(filename string, data []byte) (err error) { | ||||
| 	return WriteToFile(filename, bytes.NewReader(data)) | ||||
| } | ||||
|  | ||||
| // WriteToFile is a variant of WriteFile that accepts the data as an io.Reader | ||||
| // instead of a slice. | ||||
| func WriteToFile(filename string, data io.Reader) (err error) { | ||||
| 	f, err := ioutil.TempFile(filepath.Dir(filename), filepath.Base(filename)+patternSuffix) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	defer func() { | ||||
| 		// Only call os.Remove on f.Name() if we failed to rename it: otherwise, | ||||
| 		// some other process may have created a new file with the same name after | ||||
| 		// that. | ||||
| 		if err != nil { | ||||
| 			f.Close() | ||||
| 			os.Remove(f.Name()) | ||||
| 		} | ||||
| 	}() | ||||
|  | ||||
| 	if _, err := io.Copy(f, data); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	// Sync the file before renaming it: otherwise, after a crash the reader may | ||||
| 	// observe a 0-length file instead of the actual contents. | ||||
| 	// See https://golang.org/issue/22397#issuecomment-380831736. | ||||
| 	if err := f.Sync(); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	if err := f.Close(); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	var start time.Time | ||||
| 	for { | ||||
| 		err := os.Rename(f.Name(), filename) | ||||
| 		if err == nil || runtime.GOOS != "windows" || !strings.HasSuffix(err.Error(), "Access is denied.") { | ||||
| 			return err | ||||
| 		} | ||||
|  | ||||
| 		// Windows seems to occasionally trigger spurious "Access is denied" errors | ||||
| 		// here (see golang.org/issue/31247). We're not sure why. It's probably | ||||
| 		// worth a little extra latency to avoid propagating the spurious errors. | ||||
| 		if start.IsZero() { | ||||
| 			start = time.Now() | ||||
| 		} else if time.Since(start) >= 500*time.Millisecond { | ||||
| 			return err | ||||
| 		} | ||||
| 		time.Sleep(5 * time.Millisecond) | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										29
									
								
								vendor/honnef.co/go/tools/internal/sharedcheck/BUILD
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										29
									
								
								vendor/honnef.co/go/tools/internal/sharedcheck/BUILD
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,29 @@ | ||||
| load("@io_bazel_rules_go//go:def.bzl", "go_library") | ||||
|  | ||||
| go_library( | ||||
|     name = "go_default_library", | ||||
|     srcs = ["lint.go"], | ||||
|     importmap = "k8s.io/kubernetes/vendor/honnef.co/go/tools/internal/sharedcheck", | ||||
|     importpath = "honnef.co/go/tools/internal/sharedcheck", | ||||
|     visibility = ["//vendor/honnef.co/go/tools:__subpackages__"], | ||||
|     deps = [ | ||||
|         "//vendor/golang.org/x/tools/go/analysis:go_default_library", | ||||
|         "//vendor/honnef.co/go/tools/internal/passes/buildssa:go_default_library", | ||||
|         "//vendor/honnef.co/go/tools/lint/lintdsl:go_default_library", | ||||
|         "//vendor/honnef.co/go/tools/ssa:go_default_library", | ||||
|     ], | ||||
| ) | ||||
|  | ||||
| filegroup( | ||||
|     name = "package-srcs", | ||||
|     srcs = glob(["**"]), | ||||
|     tags = ["automanaged"], | ||||
|     visibility = ["//visibility:private"], | ||||
| ) | ||||
|  | ||||
| filegroup( | ||||
|     name = "all-srcs", | ||||
|     srcs = [":package-srcs"], | ||||
|     tags = ["automanaged"], | ||||
|     visibility = ["//visibility:public"], | ||||
| ) | ||||
							
								
								
									
										70
									
								
								vendor/honnef.co/go/tools/internal/sharedcheck/lint.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										70
									
								
								vendor/honnef.co/go/tools/internal/sharedcheck/lint.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,70 @@ | ||||
| package sharedcheck | ||||
|  | ||||
| import ( | ||||
| 	"go/ast" | ||||
| 	"go/types" | ||||
|  | ||||
| 	"golang.org/x/tools/go/analysis" | ||||
| 	"honnef.co/go/tools/internal/passes/buildssa" | ||||
| 	. "honnef.co/go/tools/lint/lintdsl" | ||||
| 	"honnef.co/go/tools/ssa" | ||||
| ) | ||||
|  | ||||
| func CheckRangeStringRunes(pass *analysis.Pass) (interface{}, error) { | ||||
| 	for _, ssafn := range pass.ResultOf[buildssa.Analyzer].(*buildssa.SSA).SrcFuncs { | ||||
| 		fn := func(node ast.Node) bool { | ||||
| 			rng, ok := node.(*ast.RangeStmt) | ||||
| 			if !ok || !IsBlank(rng.Key) { | ||||
| 				return true | ||||
| 			} | ||||
|  | ||||
| 			v, _ := ssafn.ValueForExpr(rng.X) | ||||
|  | ||||
| 			// Check that we're converting from string to []rune | ||||
| 			val, _ := v.(*ssa.Convert) | ||||
| 			if val == nil { | ||||
| 				return true | ||||
| 			} | ||||
| 			Tsrc, ok := val.X.Type().(*types.Basic) | ||||
| 			if !ok || Tsrc.Kind() != types.String { | ||||
| 				return true | ||||
| 			} | ||||
| 			Tdst, ok := val.Type().(*types.Slice) | ||||
| 			if !ok { | ||||
| 				return true | ||||
| 			} | ||||
| 			TdstElem, ok := Tdst.Elem().(*types.Basic) | ||||
| 			if !ok || TdstElem.Kind() != types.Int32 { | ||||
| 				return true | ||||
| 			} | ||||
|  | ||||
| 			// Check that the result of the conversion is only used to | ||||
| 			// range over | ||||
| 			refs := val.Referrers() | ||||
| 			if refs == nil { | ||||
| 				return true | ||||
| 			} | ||||
|  | ||||
| 			// Expect two refs: one for obtaining the length of the slice, | ||||
| 			// one for accessing the elements | ||||
| 			if len(FilterDebug(*refs)) != 2 { | ||||
| 				// TODO(dh): right now, we check that only one place | ||||
| 				// refers to our slice. This will miss cases such as | ||||
| 				// ranging over the slice twice. Ideally, we'd ensure that | ||||
| 				// the slice is only used for ranging over (without | ||||
| 				// accessing the key), but that is harder to do because in | ||||
| 				// SSA form, ranging over a slice looks like an ordinary | ||||
| 				// loop with index increments and slice accesses. We'd | ||||
| 				// have to look at the associated AST node to check that | ||||
| 				// it's a range statement. | ||||
| 				return true | ||||
| 			} | ||||
|  | ||||
| 			pass.Reportf(rng.Pos(), "should range over string, not []rune(string)") | ||||
|  | ||||
| 			return true | ||||
| 		} | ||||
| 		Inspect(ssafn.Syntax(), fn) | ||||
| 	} | ||||
| 	return nil, nil | ||||
| } | ||||
							
								
								
									
										40
									
								
								vendor/honnef.co/go/tools/lint/BUILD
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										40
									
								
								vendor/honnef.co/go/tools/lint/BUILD
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,40 @@ | ||||
| load("@io_bazel_rules_go//go:def.bzl", "go_library") | ||||
|  | ||||
| go_library( | ||||
|     name = "go_default_library", | ||||
|     srcs = [ | ||||
|         "lint.go", | ||||
|         "runner.go", | ||||
|         "stats.go", | ||||
|     ], | ||||
|     importmap = "k8s.io/kubernetes/vendor/honnef.co/go/tools/lint", | ||||
|     importpath = "honnef.co/go/tools/lint", | ||||
|     visibility = ["//visibility:public"], | ||||
|     deps = [ | ||||
|         "//vendor/golang.org/x/tools/go/analysis:go_default_library", | ||||
|         "//vendor/golang.org/x/tools/go/packages:go_default_library", | ||||
|         "//vendor/golang.org/x/tools/go/types/objectpath:go_default_library", | ||||
|         "//vendor/honnef.co/go/tools/config:go_default_library", | ||||
|         "//vendor/honnef.co/go/tools/facts:go_default_library", | ||||
|         "//vendor/honnef.co/go/tools/internal/cache:go_default_library", | ||||
|         "//vendor/honnef.co/go/tools/loader:go_default_library", | ||||
|     ], | ||||
| ) | ||||
|  | ||||
| filegroup( | ||||
|     name = "package-srcs", | ||||
|     srcs = glob(["**"]), | ||||
|     tags = ["automanaged"], | ||||
|     visibility = ["//visibility:private"], | ||||
| ) | ||||
|  | ||||
| filegroup( | ||||
|     name = "all-srcs", | ||||
|     srcs = [ | ||||
|         ":package-srcs", | ||||
|         "//vendor/honnef.co/go/tools/lint/lintdsl:all-srcs", | ||||
|         "//vendor/honnef.co/go/tools/lint/lintutil:all-srcs", | ||||
|     ], | ||||
|     tags = ["automanaged"], | ||||
|     visibility = ["//visibility:public"], | ||||
| ) | ||||
							
								
								
									
										28
									
								
								vendor/honnef.co/go/tools/lint/LICENSE
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										28
									
								
								vendor/honnef.co/go/tools/lint/LICENSE
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,28 @@ | ||||
| Copyright (c) 2013 The Go Authors. All rights reserved. | ||||
| Copyright (c) 2016 Dominik Honnef. All rights reserved. | ||||
|  | ||||
| Redistribution and use in source and binary forms, with or without | ||||
| modification, are permitted provided that the following conditions are | ||||
| met: | ||||
|  | ||||
|    * Redistributions of source code must retain the above copyright | ||||
| notice, this list of conditions and the following disclaimer. | ||||
|    * Redistributions in binary form must reproduce the above | ||||
| copyright notice, this list of conditions and the following disclaimer | ||||
| in the documentation and/or other materials provided with the | ||||
| distribution. | ||||
|    * Neither the name of Google Inc. nor the names of its | ||||
| contributors may be used to endorse or promote products derived from | ||||
| this software without specific prior written permission. | ||||
|  | ||||
| THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||||
| "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||||
| LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||||
| A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||||
| OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||||
| SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||||
| LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||||
| DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||||
| THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||||
| (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||||
| OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||||
							
								
								
									
										491
									
								
								vendor/honnef.co/go/tools/lint/lint.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										491
									
								
								vendor/honnef.co/go/tools/lint/lint.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,491 @@ | ||||
| // Package lint provides the foundation for tools like staticcheck | ||||
| package lint // import "honnef.co/go/tools/lint" | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"fmt" | ||||
| 	"go/scanner" | ||||
| 	"go/token" | ||||
| 	"go/types" | ||||
| 	"path/filepath" | ||||
| 	"sort" | ||||
| 	"strings" | ||||
| 	"sync" | ||||
| 	"sync/atomic" | ||||
| 	"unicode" | ||||
|  | ||||
| 	"golang.org/x/tools/go/analysis" | ||||
| 	"golang.org/x/tools/go/packages" | ||||
| 	"honnef.co/go/tools/config" | ||||
| ) | ||||
|  | ||||
| type Documentation struct { | ||||
| 	Title      string | ||||
| 	Text       string | ||||
| 	Since      string | ||||
| 	NonDefault bool | ||||
| 	Options    []string | ||||
| } | ||||
|  | ||||
| func (doc *Documentation) String() string { | ||||
| 	b := &strings.Builder{} | ||||
| 	fmt.Fprintf(b, "%s\n\n", doc.Title) | ||||
| 	if doc.Text != "" { | ||||
| 		fmt.Fprintf(b, "%s\n\n", doc.Text) | ||||
| 	} | ||||
| 	fmt.Fprint(b, "Available since\n    ") | ||||
| 	if doc.Since == "" { | ||||
| 		fmt.Fprint(b, "unreleased") | ||||
| 	} else { | ||||
| 		fmt.Fprintf(b, "%s", doc.Since) | ||||
| 	} | ||||
| 	if doc.NonDefault { | ||||
| 		fmt.Fprint(b, ", non-default") | ||||
| 	} | ||||
| 	fmt.Fprint(b, "\n") | ||||
| 	if len(doc.Options) > 0 { | ||||
| 		fmt.Fprintf(b, "\nOptions\n") | ||||
| 		for _, opt := range doc.Options { | ||||
| 			fmt.Fprintf(b, "    %s", opt) | ||||
| 		} | ||||
| 		fmt.Fprint(b, "\n") | ||||
| 	} | ||||
| 	return b.String() | ||||
| } | ||||
|  | ||||
| type Ignore interface { | ||||
| 	Match(p Problem) bool | ||||
| } | ||||
|  | ||||
| type LineIgnore struct { | ||||
| 	File    string | ||||
| 	Line    int | ||||
| 	Checks  []string | ||||
| 	Matched bool | ||||
| 	Pos     token.Pos | ||||
| } | ||||
|  | ||||
| func (li *LineIgnore) Match(p Problem) bool { | ||||
| 	pos := p.Pos | ||||
| 	if pos.Filename != li.File || pos.Line != li.Line { | ||||
| 		return false | ||||
| 	} | ||||
| 	for _, c := range li.Checks { | ||||
| 		if m, _ := filepath.Match(c, p.Check); m { | ||||
| 			li.Matched = true | ||||
| 			return true | ||||
| 		} | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| func (li *LineIgnore) String() string { | ||||
| 	matched := "not matched" | ||||
| 	if li.Matched { | ||||
| 		matched = "matched" | ||||
| 	} | ||||
| 	return fmt.Sprintf("%s:%d %s (%s)", li.File, li.Line, strings.Join(li.Checks, ", "), matched) | ||||
| } | ||||
|  | ||||
| type FileIgnore struct { | ||||
| 	File   string | ||||
| 	Checks []string | ||||
| } | ||||
|  | ||||
| func (fi *FileIgnore) Match(p Problem) bool { | ||||
| 	if p.Pos.Filename != fi.File { | ||||
| 		return false | ||||
| 	} | ||||
| 	for _, c := range fi.Checks { | ||||
| 		if m, _ := filepath.Match(c, p.Check); m { | ||||
| 			return true | ||||
| 		} | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| type Severity uint8 | ||||
|  | ||||
| const ( | ||||
| 	Error Severity = iota | ||||
| 	Warning | ||||
| 	Ignored | ||||
| ) | ||||
|  | ||||
| // Problem represents a problem in some source code. | ||||
| type Problem struct { | ||||
| 	Pos      token.Position | ||||
| 	End      token.Position | ||||
| 	Message  string | ||||
| 	Check    string | ||||
| 	Severity Severity | ||||
| } | ||||
|  | ||||
| func (p *Problem) String() string { | ||||
| 	return fmt.Sprintf("%s (%s)", p.Message, p.Check) | ||||
| } | ||||
|  | ||||
| // A Linter lints Go source code. | ||||
| type Linter struct { | ||||
| 	Checkers           []*analysis.Analyzer | ||||
| 	CumulativeCheckers []CumulativeChecker | ||||
| 	GoVersion          int | ||||
| 	Config             config.Config | ||||
| 	Stats              Stats | ||||
| } | ||||
|  | ||||
| type CumulativeChecker interface { | ||||
| 	Analyzer() *analysis.Analyzer | ||||
| 	Result() []types.Object | ||||
| 	ProblemObject(*token.FileSet, types.Object) Problem | ||||
| } | ||||
|  | ||||
| func (l *Linter) Lint(cfg *packages.Config, patterns []string) ([]Problem, error) { | ||||
| 	var allAnalyzers []*analysis.Analyzer | ||||
| 	allAnalyzers = append(allAnalyzers, l.Checkers...) | ||||
| 	for _, cum := range l.CumulativeCheckers { | ||||
| 		allAnalyzers = append(allAnalyzers, cum.Analyzer()) | ||||
| 	} | ||||
|  | ||||
| 	// The -checks command line flag overrules all configuration | ||||
| 	// files, which means that for `-checks="foo"`, no check other | ||||
| 	// than foo can ever be reported to the user. Make use of this | ||||
| 	// fact to cull the list of analyses we need to run. | ||||
|  | ||||
| 	// replace "inherit" with "all", as we don't want to base the | ||||
| 	// list of all checks on the default configuration, which | ||||
| 	// disables certain checks. | ||||
| 	checks := make([]string, len(l.Config.Checks)) | ||||
| 	copy(checks, l.Config.Checks) | ||||
| 	for i, c := range checks { | ||||
| 		if c == "inherit" { | ||||
| 			checks[i] = "all" | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	allowed := FilterChecks(allAnalyzers, checks) | ||||
| 	var allowedAnalyzers []*analysis.Analyzer | ||||
| 	for _, c := range l.Checkers { | ||||
| 		if allowed[c.Name] { | ||||
| 			allowedAnalyzers = append(allowedAnalyzers, c) | ||||
| 		} | ||||
| 	} | ||||
| 	hasCumulative := false | ||||
| 	for _, cum := range l.CumulativeCheckers { | ||||
| 		a := cum.Analyzer() | ||||
| 		if allowed[a.Name] { | ||||
| 			hasCumulative = true | ||||
| 			allowedAnalyzers = append(allowedAnalyzers, a) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	r, err := NewRunner(&l.Stats) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	r.goVersion = l.GoVersion | ||||
|  | ||||
| 	pkgs, err := r.Run(cfg, patterns, allowedAnalyzers, hasCumulative) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	tpkgToPkg := map[*types.Package]*Package{} | ||||
| 	for _, pkg := range pkgs { | ||||
| 		tpkgToPkg[pkg.Types] = pkg | ||||
|  | ||||
| 		for _, e := range pkg.errs { | ||||
| 			switch e := e.(type) { | ||||
| 			case types.Error: | ||||
| 				p := Problem{ | ||||
| 					Pos:      e.Fset.PositionFor(e.Pos, false), | ||||
| 					Message:  e.Msg, | ||||
| 					Severity: Error, | ||||
| 					Check:    "compile", | ||||
| 				} | ||||
| 				pkg.problems = append(pkg.problems, p) | ||||
| 			case packages.Error: | ||||
| 				msg := e.Msg | ||||
| 				if len(msg) != 0 && msg[0] == '\n' { | ||||
| 					// TODO(dh): See https://github.com/golang/go/issues/32363 | ||||
| 					msg = msg[1:] | ||||
| 				} | ||||
|  | ||||
| 				var pos token.Position | ||||
| 				if e.Pos == "" { | ||||
| 					// Under certain conditions (malformed package | ||||
| 					// declarations, multiple packages in the same | ||||
| 					// directory), go list emits an error on stderr | ||||
| 					// instead of JSON. Those errors do not have | ||||
| 					// associated position information in | ||||
| 					// go/packages.Error, even though the output on | ||||
| 					// stderr may contain it. | ||||
| 					if p, n, err := parsePos(msg); err == nil { | ||||
| 						if abs, err := filepath.Abs(p.Filename); err == nil { | ||||
| 							p.Filename = abs | ||||
| 						} | ||||
| 						pos = p | ||||
| 						msg = msg[n+2:] | ||||
| 					} | ||||
| 				} else { | ||||
| 					var err error | ||||
| 					pos, _, err = parsePos(e.Pos) | ||||
| 					if err != nil { | ||||
| 						panic(fmt.Sprintf("internal error: %s", e)) | ||||
| 					} | ||||
| 				} | ||||
| 				p := Problem{ | ||||
| 					Pos:      pos, | ||||
| 					Message:  msg, | ||||
| 					Severity: Error, | ||||
| 					Check:    "compile", | ||||
| 				} | ||||
| 				pkg.problems = append(pkg.problems, p) | ||||
| 			case scanner.ErrorList: | ||||
| 				for _, e := range e { | ||||
| 					p := Problem{ | ||||
| 						Pos:      e.Pos, | ||||
| 						Message:  e.Msg, | ||||
| 						Severity: Error, | ||||
| 						Check:    "compile", | ||||
| 					} | ||||
| 					pkg.problems = append(pkg.problems, p) | ||||
| 				} | ||||
| 			case error: | ||||
| 				p := Problem{ | ||||
| 					Pos:      token.Position{}, | ||||
| 					Message:  e.Error(), | ||||
| 					Severity: Error, | ||||
| 					Check:    "compile", | ||||
| 				} | ||||
| 				pkg.problems = append(pkg.problems, p) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	atomic.StoreUint32(&r.stats.State, StateCumulative) | ||||
| 	var problems []Problem | ||||
| 	for _, cum := range l.CumulativeCheckers { | ||||
| 		for _, res := range cum.Result() { | ||||
| 			pkg := tpkgToPkg[res.Pkg()] | ||||
| 			allowedChecks := FilterChecks(allowedAnalyzers, pkg.cfg.Merge(l.Config).Checks) | ||||
| 			if allowedChecks[cum.Analyzer().Name] { | ||||
| 				pos := DisplayPosition(pkg.Fset, res.Pos()) | ||||
| 				// FIXME(dh): why are we ignoring generated files | ||||
| 				// here? Surely this is specific to 'unused', not all | ||||
| 				// cumulative checkers | ||||
| 				if _, ok := pkg.gen[pos.Filename]; ok { | ||||
| 					continue | ||||
| 				} | ||||
| 				p := cum.ProblemObject(pkg.Fset, res) | ||||
| 				problems = append(problems, p) | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	for _, pkg := range pkgs { | ||||
| 		for _, ig := range pkg.ignores { | ||||
| 			for i := range pkg.problems { | ||||
| 				p := &pkg.problems[i] | ||||
| 				if ig.Match(*p) { | ||||
| 					p.Severity = Ignored | ||||
| 				} | ||||
| 			} | ||||
| 			for i := range problems { | ||||
| 				p := &problems[i] | ||||
| 				if ig.Match(*p) { | ||||
| 					p.Severity = Ignored | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		if pkg.cfg == nil { | ||||
| 			// The package failed to load, otherwise we would have a | ||||
| 			// valid config. Pass through all errors. | ||||
| 			problems = append(problems, pkg.problems...) | ||||
| 		} else { | ||||
| 			for _, p := range pkg.problems { | ||||
| 				allowedChecks := FilterChecks(allowedAnalyzers, pkg.cfg.Merge(l.Config).Checks) | ||||
| 				allowedChecks["compile"] = true | ||||
| 				if allowedChecks[p.Check] { | ||||
| 					problems = append(problems, p) | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		for _, ig := range pkg.ignores { | ||||
| 			ig, ok := ig.(*LineIgnore) | ||||
| 			if !ok { | ||||
| 				continue | ||||
| 			} | ||||
| 			if ig.Matched { | ||||
| 				continue | ||||
| 			} | ||||
|  | ||||
| 			couldveMatched := false | ||||
| 			allowedChecks := FilterChecks(allowedAnalyzers, pkg.cfg.Merge(l.Config).Checks) | ||||
| 			for _, c := range ig.Checks { | ||||
| 				if !allowedChecks[c] { | ||||
| 					continue | ||||
| 				} | ||||
| 				couldveMatched = true | ||||
| 				break | ||||
| 			} | ||||
|  | ||||
| 			if !couldveMatched { | ||||
| 				// The ignored checks were disabled for the containing package. | ||||
| 				// Don't flag the ignore for not having matched. | ||||
| 				continue | ||||
| 			} | ||||
| 			p := Problem{ | ||||
| 				Pos:     DisplayPosition(pkg.Fset, ig.Pos), | ||||
| 				Message: "this linter directive didn't match anything; should it be removed?", | ||||
| 				Check:   "", | ||||
| 			} | ||||
| 			problems = append(problems, p) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if len(problems) == 0 { | ||||
| 		return nil, nil | ||||
| 	} | ||||
|  | ||||
| 	sort.Slice(problems, func(i, j int) bool { | ||||
| 		pi := problems[i].Pos | ||||
| 		pj := problems[j].Pos | ||||
|  | ||||
| 		if pi.Filename != pj.Filename { | ||||
| 			return pi.Filename < pj.Filename | ||||
| 		} | ||||
| 		if pi.Line != pj.Line { | ||||
| 			return pi.Line < pj.Line | ||||
| 		} | ||||
| 		if pi.Column != pj.Column { | ||||
| 			return pi.Column < pj.Column | ||||
| 		} | ||||
|  | ||||
| 		return problems[i].Message < problems[j].Message | ||||
| 	}) | ||||
|  | ||||
| 	var out []Problem | ||||
| 	out = append(out, problems[0]) | ||||
| 	for i, p := range problems[1:] { | ||||
| 		// We may encounter duplicate problems because one file | ||||
| 		// can be part of many packages. | ||||
| 		if problems[i] != p { | ||||
| 			out = append(out, p) | ||||
| 		} | ||||
| 	} | ||||
| 	return out, nil | ||||
| } | ||||
|  | ||||
| func FilterChecks(allChecks []*analysis.Analyzer, checks []string) map[string]bool { | ||||
| 	// OPT(dh): this entire computation could be cached per package | ||||
| 	allowedChecks := map[string]bool{} | ||||
|  | ||||
| 	for _, check := range checks { | ||||
| 		b := true | ||||
| 		if len(check) > 1 && check[0] == '-' { | ||||
| 			b = false | ||||
| 			check = check[1:] | ||||
| 		} | ||||
| 		if check == "*" || check == "all" { | ||||
| 			// Match all | ||||
| 			for _, c := range allChecks { | ||||
| 				allowedChecks[c.Name] = b | ||||
| 			} | ||||
| 		} else if strings.HasSuffix(check, "*") { | ||||
| 			// Glob | ||||
| 			prefix := check[:len(check)-1] | ||||
| 			isCat := strings.IndexFunc(prefix, func(r rune) bool { return unicode.IsNumber(r) }) == -1 | ||||
|  | ||||
| 			for _, c := range allChecks { | ||||
| 				idx := strings.IndexFunc(c.Name, func(r rune) bool { return unicode.IsNumber(r) }) | ||||
| 				if isCat { | ||||
| 					// Glob is S*, which should match S1000 but not SA1000 | ||||
| 					cat := c.Name[:idx] | ||||
| 					if prefix == cat { | ||||
| 						allowedChecks[c.Name] = b | ||||
| 					} | ||||
| 				} else { | ||||
| 					// Glob is S1* | ||||
| 					if strings.HasPrefix(c.Name, prefix) { | ||||
| 						allowedChecks[c.Name] = b | ||||
| 					} | ||||
| 				} | ||||
| 			} | ||||
| 		} else { | ||||
| 			// Literal check name | ||||
| 			allowedChecks[check] = b | ||||
| 		} | ||||
| 	} | ||||
| 	return allowedChecks | ||||
| } | ||||
|  | ||||
| type Positioner interface { | ||||
| 	Pos() token.Pos | ||||
| } | ||||
|  | ||||
| func DisplayPosition(fset *token.FileSet, p token.Pos) token.Position { | ||||
| 	if p == token.NoPos { | ||||
| 		return token.Position{} | ||||
| 	} | ||||
|  | ||||
| 	// Only use the adjusted position if it points to another Go file. | ||||
| 	// This means we'll point to the original file for cgo files, but | ||||
| 	// we won't point to a YACC grammar file. | ||||
| 	pos := fset.PositionFor(p, false) | ||||
| 	adjPos := fset.PositionFor(p, true) | ||||
|  | ||||
| 	if filepath.Ext(adjPos.Filename) == ".go" { | ||||
| 		return adjPos | ||||
| 	} | ||||
| 	return pos | ||||
| } | ||||
|  | ||||
| var bufferPool = &sync.Pool{ | ||||
| 	New: func() interface{} { | ||||
| 		buf := bytes.NewBuffer(nil) | ||||
| 		buf.Grow(64) | ||||
| 		return buf | ||||
| 	}, | ||||
| } | ||||
|  | ||||
| func FuncName(f *types.Func) string { | ||||
| 	buf := bufferPool.Get().(*bytes.Buffer) | ||||
| 	buf.Reset() | ||||
| 	if f.Type() != nil { | ||||
| 		sig := f.Type().(*types.Signature) | ||||
| 		if recv := sig.Recv(); recv != nil { | ||||
| 			buf.WriteByte('(') | ||||
| 			if _, ok := recv.Type().(*types.Interface); ok { | ||||
| 				// gcimporter creates abstract methods of | ||||
| 				// named interfaces using the interface type | ||||
| 				// (not the named type) as the receiver. | ||||
| 				// Don't print it in full. | ||||
| 				buf.WriteString("interface") | ||||
| 			} else { | ||||
| 				types.WriteType(buf, recv.Type(), nil) | ||||
| 			} | ||||
| 			buf.WriteByte(')') | ||||
| 			buf.WriteByte('.') | ||||
| 		} else if f.Pkg() != nil { | ||||
| 			writePackage(buf, f.Pkg()) | ||||
| 		} | ||||
| 	} | ||||
| 	buf.WriteString(f.Name()) | ||||
| 	s := buf.String() | ||||
| 	bufferPool.Put(buf) | ||||
| 	return s | ||||
| } | ||||
|  | ||||
| func writePackage(buf *bytes.Buffer, pkg *types.Package) { | ||||
| 	if pkg == nil { | ||||
| 		return | ||||
| 	} | ||||
| 	s := pkg.Path() | ||||
| 	if s != "" { | ||||
| 		buf.WriteString(s) | ||||
| 		buf.WriteByte('.') | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										29
									
								
								vendor/honnef.co/go/tools/lint/lintdsl/BUILD
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										29
									
								
								vendor/honnef.co/go/tools/lint/lintdsl/BUILD
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,29 @@ | ||||
| load("@io_bazel_rules_go//go:def.bzl", "go_library") | ||||
|  | ||||
| go_library( | ||||
|     name = "go_default_library", | ||||
|     srcs = ["lintdsl.go"], | ||||
|     importmap = "k8s.io/kubernetes/vendor/honnef.co/go/tools/lint/lintdsl", | ||||
|     importpath = "honnef.co/go/tools/lint/lintdsl", | ||||
|     visibility = ["//visibility:public"], | ||||
|     deps = [ | ||||
|         "//vendor/golang.org/x/tools/go/analysis:go_default_library", | ||||
|         "//vendor/honnef.co/go/tools/facts:go_default_library", | ||||
|         "//vendor/honnef.co/go/tools/lint:go_default_library", | ||||
|         "//vendor/honnef.co/go/tools/ssa:go_default_library", | ||||
|     ], | ||||
| ) | ||||
|  | ||||
| filegroup( | ||||
|     name = "package-srcs", | ||||
|     srcs = glob(["**"]), | ||||
|     tags = ["automanaged"], | ||||
|     visibility = ["//visibility:private"], | ||||
| ) | ||||
|  | ||||
| filegroup( | ||||
|     name = "all-srcs", | ||||
|     srcs = [":package-srcs"], | ||||
|     tags = ["automanaged"], | ||||
|     visibility = ["//visibility:public"], | ||||
| ) | ||||
							
								
								
									
										400
									
								
								vendor/honnef.co/go/tools/lint/lintdsl/lintdsl.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										400
									
								
								vendor/honnef.co/go/tools/lint/lintdsl/lintdsl.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,400 @@ | ||||
| // Package lintdsl provides helpers for implementing static analysis | ||||
| // checks. Dot-importing this package is encouraged. | ||||
| package lintdsl | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"flag" | ||||
| 	"fmt" | ||||
| 	"go/ast" | ||||
| 	"go/constant" | ||||
| 	"go/printer" | ||||
| 	"go/token" | ||||
| 	"go/types" | ||||
| 	"strings" | ||||
|  | ||||
| 	"golang.org/x/tools/go/analysis" | ||||
| 	"honnef.co/go/tools/facts" | ||||
| 	"honnef.co/go/tools/lint" | ||||
| 	"honnef.co/go/tools/ssa" | ||||
| ) | ||||
|  | ||||
| type packager interface { | ||||
| 	Package() *ssa.Package | ||||
| } | ||||
|  | ||||
| func CallName(call *ssa.CallCommon) string { | ||||
| 	if call.IsInvoke() { | ||||
| 		return "" | ||||
| 	} | ||||
| 	switch v := call.Value.(type) { | ||||
| 	case *ssa.Function: | ||||
| 		fn, ok := v.Object().(*types.Func) | ||||
| 		if !ok { | ||||
| 			return "" | ||||
| 		} | ||||
| 		return lint.FuncName(fn) | ||||
| 	case *ssa.Builtin: | ||||
| 		return v.Name() | ||||
| 	} | ||||
| 	return "" | ||||
| } | ||||
|  | ||||
| func IsCallTo(call *ssa.CallCommon, name string) bool { return CallName(call) == name } | ||||
| func IsType(T types.Type, name string) bool           { return types.TypeString(T, nil) == name } | ||||
|  | ||||
| func FilterDebug(instr []ssa.Instruction) []ssa.Instruction { | ||||
| 	var out []ssa.Instruction | ||||
| 	for _, ins := range instr { | ||||
| 		if _, ok := ins.(*ssa.DebugRef); !ok { | ||||
| 			out = append(out, ins) | ||||
| 		} | ||||
| 	} | ||||
| 	return out | ||||
| } | ||||
|  | ||||
| func IsExample(fn *ssa.Function) bool { | ||||
| 	if !strings.HasPrefix(fn.Name(), "Example") { | ||||
| 		return false | ||||
| 	} | ||||
| 	f := fn.Prog.Fset.File(fn.Pos()) | ||||
| 	if f == nil { | ||||
| 		return false | ||||
| 	} | ||||
| 	return strings.HasSuffix(f.Name(), "_test.go") | ||||
| } | ||||
|  | ||||
| func IsPointerLike(T types.Type) bool { | ||||
| 	switch T := T.Underlying().(type) { | ||||
| 	case *types.Interface, *types.Chan, *types.Map, *types.Signature, *types.Pointer: | ||||
| 		return true | ||||
| 	case *types.Basic: | ||||
| 		return T.Kind() == types.UnsafePointer | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| func IsIdent(expr ast.Expr, ident string) bool { | ||||
| 	id, ok := expr.(*ast.Ident) | ||||
| 	return ok && id.Name == ident | ||||
| } | ||||
|  | ||||
| // isBlank returns whether id is the blank identifier "_". | ||||
| // If id == nil, the answer is false. | ||||
| func IsBlank(id ast.Expr) bool { | ||||
| 	ident, _ := id.(*ast.Ident) | ||||
| 	return ident != nil && ident.Name == "_" | ||||
| } | ||||
|  | ||||
| func IsIntLiteral(expr ast.Expr, literal string) bool { | ||||
| 	lit, ok := expr.(*ast.BasicLit) | ||||
| 	return ok && lit.Kind == token.INT && lit.Value == literal | ||||
| } | ||||
|  | ||||
| // Deprecated: use IsIntLiteral instead | ||||
| func IsZero(expr ast.Expr) bool { | ||||
| 	return IsIntLiteral(expr, "0") | ||||
| } | ||||
|  | ||||
| func IsOfType(pass *analysis.Pass, expr ast.Expr, name string) bool { | ||||
| 	return IsType(pass.TypesInfo.TypeOf(expr), name) | ||||
| } | ||||
|  | ||||
| func IsInTest(pass *analysis.Pass, node lint.Positioner) bool { | ||||
| 	// FIXME(dh): this doesn't work for global variables with | ||||
| 	// initializers | ||||
| 	f := pass.Fset.File(node.Pos()) | ||||
| 	return f != nil && strings.HasSuffix(f.Name(), "_test.go") | ||||
| } | ||||
|  | ||||
| func IsInMain(pass *analysis.Pass, node lint.Positioner) bool { | ||||
| 	if node, ok := node.(packager); ok { | ||||
| 		return node.Package().Pkg.Name() == "main" | ||||
| 	} | ||||
| 	return pass.Pkg.Name() == "main" | ||||
| } | ||||
|  | ||||
| func SelectorName(pass *analysis.Pass, expr *ast.SelectorExpr) string { | ||||
| 	info := pass.TypesInfo | ||||
| 	sel := info.Selections[expr] | ||||
| 	if sel == nil { | ||||
| 		if x, ok := expr.X.(*ast.Ident); ok { | ||||
| 			pkg, ok := info.ObjectOf(x).(*types.PkgName) | ||||
| 			if !ok { | ||||
| 				// This shouldn't happen | ||||
| 				return fmt.Sprintf("%s.%s", x.Name, expr.Sel.Name) | ||||
| 			} | ||||
| 			return fmt.Sprintf("%s.%s", pkg.Imported().Path(), expr.Sel.Name) | ||||
| 		} | ||||
| 		panic(fmt.Sprintf("unsupported selector: %v", expr)) | ||||
| 	} | ||||
| 	return fmt.Sprintf("(%s).%s", sel.Recv(), sel.Obj().Name()) | ||||
| } | ||||
|  | ||||
| func IsNil(pass *analysis.Pass, expr ast.Expr) bool { | ||||
| 	return pass.TypesInfo.Types[expr].IsNil() | ||||
| } | ||||
|  | ||||
| func BoolConst(pass *analysis.Pass, expr ast.Expr) bool { | ||||
| 	val := pass.TypesInfo.ObjectOf(expr.(*ast.Ident)).(*types.Const).Val() | ||||
| 	return constant.BoolVal(val) | ||||
| } | ||||
|  | ||||
| func IsBoolConst(pass *analysis.Pass, expr ast.Expr) bool { | ||||
| 	// We explicitly don't support typed bools because more often than | ||||
| 	// not, custom bool types are used as binary enums and the | ||||
| 	// explicit comparison is desired. | ||||
|  | ||||
| 	ident, ok := expr.(*ast.Ident) | ||||
| 	if !ok { | ||||
| 		return false | ||||
| 	} | ||||
| 	obj := pass.TypesInfo.ObjectOf(ident) | ||||
| 	c, ok := obj.(*types.Const) | ||||
| 	if !ok { | ||||
| 		return false | ||||
| 	} | ||||
| 	basic, ok := c.Type().(*types.Basic) | ||||
| 	if !ok { | ||||
| 		return false | ||||
| 	} | ||||
| 	if basic.Kind() != types.UntypedBool && basic.Kind() != types.Bool { | ||||
| 		return false | ||||
| 	} | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| func ExprToInt(pass *analysis.Pass, expr ast.Expr) (int64, bool) { | ||||
| 	tv := pass.TypesInfo.Types[expr] | ||||
| 	if tv.Value == nil { | ||||
| 		return 0, false | ||||
| 	} | ||||
| 	if tv.Value.Kind() != constant.Int { | ||||
| 		return 0, false | ||||
| 	} | ||||
| 	return constant.Int64Val(tv.Value) | ||||
| } | ||||
|  | ||||
| func ExprToString(pass *analysis.Pass, expr ast.Expr) (string, bool) { | ||||
| 	val := pass.TypesInfo.Types[expr].Value | ||||
| 	if val == nil { | ||||
| 		return "", false | ||||
| 	} | ||||
| 	if val.Kind() != constant.String { | ||||
| 		return "", false | ||||
| 	} | ||||
| 	return constant.StringVal(val), true | ||||
| } | ||||
|  | ||||
| // Dereference returns a pointer's element type; otherwise it returns | ||||
| // T. | ||||
| func Dereference(T types.Type) types.Type { | ||||
| 	if p, ok := T.Underlying().(*types.Pointer); ok { | ||||
| 		return p.Elem() | ||||
| 	} | ||||
| 	return T | ||||
| } | ||||
|  | ||||
| // DereferenceR returns a pointer's element type; otherwise it returns | ||||
| // T. If the element type is itself a pointer, DereferenceR will be | ||||
| // applied recursively. | ||||
| func DereferenceR(T types.Type) types.Type { | ||||
| 	if p, ok := T.Underlying().(*types.Pointer); ok { | ||||
| 		return DereferenceR(p.Elem()) | ||||
| 	} | ||||
| 	return T | ||||
| } | ||||
|  | ||||
| func IsGoVersion(pass *analysis.Pass, minor int) bool { | ||||
| 	version := pass.Analyzer.Flags.Lookup("go").Value.(flag.Getter).Get().(int) | ||||
| 	return version >= minor | ||||
| } | ||||
|  | ||||
| func CallNameAST(pass *analysis.Pass, call *ast.CallExpr) string { | ||||
| 	switch fun := call.Fun.(type) { | ||||
| 	case *ast.SelectorExpr: | ||||
| 		fn, ok := pass.TypesInfo.ObjectOf(fun.Sel).(*types.Func) | ||||
| 		if !ok { | ||||
| 			return "" | ||||
| 		} | ||||
| 		return lint.FuncName(fn) | ||||
| 	case *ast.Ident: | ||||
| 		obj := pass.TypesInfo.ObjectOf(fun) | ||||
| 		switch obj := obj.(type) { | ||||
| 		case *types.Func: | ||||
| 			return lint.FuncName(obj) | ||||
| 		case *types.Builtin: | ||||
| 			return obj.Name() | ||||
| 		default: | ||||
| 			return "" | ||||
| 		} | ||||
| 	default: | ||||
| 		return "" | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func IsCallToAST(pass *analysis.Pass, node ast.Node, name string) bool { | ||||
| 	call, ok := node.(*ast.CallExpr) | ||||
| 	if !ok { | ||||
| 		return false | ||||
| 	} | ||||
| 	return CallNameAST(pass, call) == name | ||||
| } | ||||
|  | ||||
| func IsCallToAnyAST(pass *analysis.Pass, node ast.Node, names ...string) bool { | ||||
| 	for _, name := range names { | ||||
| 		if IsCallToAST(pass, node, name) { | ||||
| 			return true | ||||
| 		} | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| func Render(pass *analysis.Pass, x interface{}) string { | ||||
| 	var buf bytes.Buffer | ||||
| 	if err := printer.Fprint(&buf, pass.Fset, x); err != nil { | ||||
| 		panic(err) | ||||
| 	} | ||||
| 	return buf.String() | ||||
| } | ||||
|  | ||||
| func RenderArgs(pass *analysis.Pass, args []ast.Expr) string { | ||||
| 	var ss []string | ||||
| 	for _, arg := range args { | ||||
| 		ss = append(ss, Render(pass, arg)) | ||||
| 	} | ||||
| 	return strings.Join(ss, ", ") | ||||
| } | ||||
|  | ||||
| func Preamble(f *ast.File) string { | ||||
| 	cutoff := f.Package | ||||
| 	if f.Doc != nil { | ||||
| 		cutoff = f.Doc.Pos() | ||||
| 	} | ||||
| 	var out []string | ||||
| 	for _, cmt := range f.Comments { | ||||
| 		if cmt.Pos() >= cutoff { | ||||
| 			break | ||||
| 		} | ||||
| 		out = append(out, cmt.Text()) | ||||
| 	} | ||||
| 	return strings.Join(out, "\n") | ||||
| } | ||||
|  | ||||
| func Inspect(node ast.Node, fn func(node ast.Node) bool) { | ||||
| 	if node == nil { | ||||
| 		return | ||||
| 	} | ||||
| 	ast.Inspect(node, fn) | ||||
| } | ||||
|  | ||||
| func GroupSpecs(fset *token.FileSet, specs []ast.Spec) [][]ast.Spec { | ||||
| 	if len(specs) == 0 { | ||||
| 		return nil | ||||
| 	} | ||||
| 	groups := make([][]ast.Spec, 1) | ||||
| 	groups[0] = append(groups[0], specs[0]) | ||||
|  | ||||
| 	for _, spec := range specs[1:] { | ||||
| 		g := groups[len(groups)-1] | ||||
| 		if fset.PositionFor(spec.Pos(), false).Line-1 != | ||||
| 			fset.PositionFor(g[len(g)-1].End(), false).Line { | ||||
|  | ||||
| 			groups = append(groups, nil) | ||||
| 		} | ||||
|  | ||||
| 		groups[len(groups)-1] = append(groups[len(groups)-1], spec) | ||||
| 	} | ||||
|  | ||||
| 	return groups | ||||
| } | ||||
|  | ||||
| func IsObject(obj types.Object, name string) bool { | ||||
| 	var path string | ||||
| 	if pkg := obj.Pkg(); pkg != nil { | ||||
| 		path = pkg.Path() + "." | ||||
| 	} | ||||
| 	return path+obj.Name() == name | ||||
| } | ||||
|  | ||||
| type Field struct { | ||||
| 	Var  *types.Var | ||||
| 	Tag  string | ||||
| 	Path []int | ||||
| } | ||||
|  | ||||
| // FlattenFields recursively flattens T and embedded structs, | ||||
| // returning a list of fields. If multiple fields with the same name | ||||
| // exist, all will be returned. | ||||
| func FlattenFields(T *types.Struct) []Field { | ||||
| 	return flattenFields(T, nil, nil) | ||||
| } | ||||
|  | ||||
| func flattenFields(T *types.Struct, path []int, seen map[types.Type]bool) []Field { | ||||
| 	if seen == nil { | ||||
| 		seen = map[types.Type]bool{} | ||||
| 	} | ||||
| 	if seen[T] { | ||||
| 		return nil | ||||
| 	} | ||||
| 	seen[T] = true | ||||
| 	var out []Field | ||||
| 	for i := 0; i < T.NumFields(); i++ { | ||||
| 		field := T.Field(i) | ||||
| 		tag := T.Tag(i) | ||||
| 		np := append(path[:len(path):len(path)], i) | ||||
| 		if field.Anonymous() { | ||||
| 			if s, ok := Dereference(field.Type()).Underlying().(*types.Struct); ok { | ||||
| 				out = append(out, flattenFields(s, np, seen)...) | ||||
| 			} | ||||
| 		} else { | ||||
| 			out = append(out, Field{field, tag, np}) | ||||
| 		} | ||||
| 	} | ||||
| 	return out | ||||
| } | ||||
|  | ||||
| func File(pass *analysis.Pass, node lint.Positioner) *ast.File { | ||||
| 	pass.Fset.PositionFor(node.Pos(), true) | ||||
| 	m := pass.ResultOf[facts.TokenFile].(map[*token.File]*ast.File) | ||||
| 	return m[pass.Fset.File(node.Pos())] | ||||
| } | ||||
|  | ||||
| // IsGenerated reports whether pos is in a generated file, It ignores | ||||
| // //line directives. | ||||
| func IsGenerated(pass *analysis.Pass, pos token.Pos) bool { | ||||
| 	_, ok := Generator(pass, pos) | ||||
| 	return ok | ||||
| } | ||||
|  | ||||
| // Generator returns the generator that generated the file containing | ||||
| // pos. It ignores //line directives. | ||||
| func Generator(pass *analysis.Pass, pos token.Pos) (facts.Generator, bool) { | ||||
| 	file := pass.Fset.PositionFor(pos, false).Filename | ||||
| 	m := pass.ResultOf[facts.Generated].(map[string]facts.Generator) | ||||
| 	g, ok := m[file] | ||||
| 	return g, ok | ||||
| } | ||||
|  | ||||
| func ReportfFG(pass *analysis.Pass, pos token.Pos, f string, args ...interface{}) { | ||||
| 	file := lint.DisplayPosition(pass.Fset, pos).Filename | ||||
| 	m := pass.ResultOf[facts.Generated].(map[string]facts.Generator) | ||||
| 	if _, ok := m[file]; ok { | ||||
| 		return | ||||
| 	} | ||||
| 	pass.Reportf(pos, f, args...) | ||||
| } | ||||
|  | ||||
| func ReportNodef(pass *analysis.Pass, node ast.Node, format string, args ...interface{}) { | ||||
| 	msg := fmt.Sprintf(format, args...) | ||||
| 	pass.Report(analysis.Diagnostic{Pos: node.Pos(), End: node.End(), Message: msg}) | ||||
| } | ||||
|  | ||||
| func ReportNodefFG(pass *analysis.Pass, node ast.Node, format string, args ...interface{}) { | ||||
| 	file := lint.DisplayPosition(pass.Fset, node.Pos()).Filename | ||||
| 	m := pass.ResultOf[facts.Generated].(map[string]facts.Generator) | ||||
| 	if _, ok := m[file]; ok { | ||||
| 		return | ||||
| 	} | ||||
| 	ReportNodef(pass, node, format, args...) | ||||
| } | ||||
							
								
								
									
										41
									
								
								vendor/honnef.co/go/tools/lint/lintutil/BUILD
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										41
									
								
								vendor/honnef.co/go/tools/lint/lintutil/BUILD
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,41 @@ | ||||
| load("@io_bazel_rules_go//go:def.bzl", "go_library") | ||||
|  | ||||
| go_library( | ||||
|     name = "go_default_library", | ||||
|     srcs = [ | ||||
|         "stats.go", | ||||
|         "stats_bsd.go", | ||||
|         "stats_posix.go", | ||||
|         "util.go", | ||||
|     ], | ||||
|     importmap = "k8s.io/kubernetes/vendor/honnef.co/go/tools/lint/lintutil", | ||||
|     importpath = "honnef.co/go/tools/lint/lintutil", | ||||
|     visibility = ["//visibility:public"], | ||||
|     deps = [ | ||||
|         "//vendor/golang.org/x/tools/go/analysis:go_default_library", | ||||
|         "//vendor/golang.org/x/tools/go/buildutil:go_default_library", | ||||
|         "//vendor/golang.org/x/tools/go/packages:go_default_library", | ||||
|         "//vendor/honnef.co/go/tools/config:go_default_library", | ||||
|         "//vendor/honnef.co/go/tools/internal/cache:go_default_library", | ||||
|         "//vendor/honnef.co/go/tools/lint:go_default_library", | ||||
|         "//vendor/honnef.co/go/tools/lint/lintutil/format:go_default_library", | ||||
|         "//vendor/honnef.co/go/tools/version:go_default_library", | ||||
|     ], | ||||
| ) | ||||
|  | ||||
| filegroup( | ||||
|     name = "package-srcs", | ||||
|     srcs = glob(["**"]), | ||||
|     tags = ["automanaged"], | ||||
|     visibility = ["//visibility:private"], | ||||
| ) | ||||
|  | ||||
| filegroup( | ||||
|     name = "all-srcs", | ||||
|     srcs = [ | ||||
|         ":package-srcs", | ||||
|         "//vendor/honnef.co/go/tools/lint/lintutil/format:all-srcs", | ||||
|     ], | ||||
|     tags = ["automanaged"], | ||||
|     visibility = ["//visibility:public"], | ||||
| ) | ||||
							
								
								
									
										24
									
								
								vendor/honnef.co/go/tools/lint/lintutil/format/BUILD
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										24
									
								
								vendor/honnef.co/go/tools/lint/lintutil/format/BUILD
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,24 @@ | ||||
| load("@io_bazel_rules_go//go:def.bzl", "go_library") | ||||
|  | ||||
| go_library( | ||||
|     name = "go_default_library", | ||||
|     srcs = ["format.go"], | ||||
|     importmap = "k8s.io/kubernetes/vendor/honnef.co/go/tools/lint/lintutil/format", | ||||
|     importpath = "honnef.co/go/tools/lint/lintutil/format", | ||||
|     visibility = ["//visibility:public"], | ||||
|     deps = ["//vendor/honnef.co/go/tools/lint:go_default_library"], | ||||
| ) | ||||
|  | ||||
| filegroup( | ||||
|     name = "package-srcs", | ||||
|     srcs = glob(["**"]), | ||||
|     tags = ["automanaged"], | ||||
|     visibility = ["//visibility:private"], | ||||
| ) | ||||
|  | ||||
| filegroup( | ||||
|     name = "all-srcs", | ||||
|     srcs = [":package-srcs"], | ||||
|     tags = ["automanaged"], | ||||
|     visibility = ["//visibility:public"], | ||||
| ) | ||||
							
								
								
									
										135
									
								
								vendor/honnef.co/go/tools/lint/lintutil/format/format.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										135
									
								
								vendor/honnef.co/go/tools/lint/lintutil/format/format.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,135 @@ | ||||
| // Package format provides formatters for linter problems. | ||||
| package format | ||||
|  | ||||
| import ( | ||||
| 	"encoding/json" | ||||
| 	"fmt" | ||||
| 	"go/token" | ||||
| 	"io" | ||||
| 	"os" | ||||
| 	"path/filepath" | ||||
| 	"text/tabwriter" | ||||
|  | ||||
| 	"honnef.co/go/tools/lint" | ||||
| ) | ||||
|  | ||||
| func shortPath(path string) string { | ||||
| 	cwd, err := os.Getwd() | ||||
| 	if err != nil { | ||||
| 		return path | ||||
| 	} | ||||
| 	if rel, err := filepath.Rel(cwd, path); err == nil && len(rel) < len(path) { | ||||
| 		return rel | ||||
| 	} | ||||
| 	return path | ||||
| } | ||||
|  | ||||
| func relativePositionString(pos token.Position) string { | ||||
| 	s := shortPath(pos.Filename) | ||||
| 	if pos.IsValid() { | ||||
| 		if s != "" { | ||||
| 			s += ":" | ||||
| 		} | ||||
| 		s += fmt.Sprintf("%d:%d", pos.Line, pos.Column) | ||||
| 	} | ||||
| 	if s == "" { | ||||
| 		s = "-" | ||||
| 	} | ||||
| 	return s | ||||
| } | ||||
|  | ||||
| type Statter interface { | ||||
| 	Stats(total, errors, warnings int) | ||||
| } | ||||
|  | ||||
| type Formatter interface { | ||||
| 	Format(p lint.Problem) | ||||
| } | ||||
|  | ||||
| type Text struct { | ||||
| 	W io.Writer | ||||
| } | ||||
|  | ||||
| func (o Text) Format(p lint.Problem) { | ||||
| 	fmt.Fprintf(o.W, "%v: %s\n", relativePositionString(p.Pos), p.String()) | ||||
| } | ||||
|  | ||||
| type JSON struct { | ||||
| 	W io.Writer | ||||
| } | ||||
|  | ||||
| func severity(s lint.Severity) string { | ||||
| 	switch s { | ||||
| 	case lint.Error: | ||||
| 		return "error" | ||||
| 	case lint.Warning: | ||||
| 		return "warning" | ||||
| 	case lint.Ignored: | ||||
| 		return "ignored" | ||||
| 	} | ||||
| 	return "" | ||||
| } | ||||
|  | ||||
| func (o JSON) Format(p lint.Problem) { | ||||
| 	type location struct { | ||||
| 		File   string `json:"file"` | ||||
| 		Line   int    `json:"line"` | ||||
| 		Column int    `json:"column"` | ||||
| 	} | ||||
| 	jp := struct { | ||||
| 		Code     string   `json:"code"` | ||||
| 		Severity string   `json:"severity,omitempty"` | ||||
| 		Location location `json:"location"` | ||||
| 		End      location `json:"end"` | ||||
| 		Message  string   `json:"message"` | ||||
| 	}{ | ||||
| 		Code:     p.Check, | ||||
| 		Severity: severity(p.Severity), | ||||
| 		Location: location{ | ||||
| 			File:   p.Pos.Filename, | ||||
| 			Line:   p.Pos.Line, | ||||
| 			Column: p.Pos.Column, | ||||
| 		}, | ||||
| 		End: location{ | ||||
| 			File:   p.End.Filename, | ||||
| 			Line:   p.End.Line, | ||||
| 			Column: p.End.Column, | ||||
| 		}, | ||||
| 		Message: p.Message, | ||||
| 	} | ||||
| 	_ = json.NewEncoder(o.W).Encode(jp) | ||||
| } | ||||
|  | ||||
| type Stylish struct { | ||||
| 	W io.Writer | ||||
|  | ||||
| 	prevFile string | ||||
| 	tw       *tabwriter.Writer | ||||
| } | ||||
|  | ||||
| func (o *Stylish) Format(p lint.Problem) { | ||||
| 	pos := p.Pos | ||||
| 	if pos.Filename == "" { | ||||
| 		pos.Filename = "-" | ||||
| 	} | ||||
|  | ||||
| 	if pos.Filename != o.prevFile { | ||||
| 		if o.prevFile != "" { | ||||
| 			o.tw.Flush() | ||||
| 			fmt.Fprintln(o.W) | ||||
| 		} | ||||
| 		fmt.Fprintln(o.W, pos.Filename) | ||||
| 		o.prevFile = pos.Filename | ||||
| 		o.tw = tabwriter.NewWriter(o.W, 0, 4, 2, ' ', 0) | ||||
| 	} | ||||
| 	fmt.Fprintf(o.tw, "  (%d, %d)\t%s\t%s\n", pos.Line, pos.Column, p.Check, p.Message) | ||||
| } | ||||
|  | ||||
| func (o *Stylish) Stats(total, errors, warnings int) { | ||||
| 	if o.tw != nil { | ||||
| 		o.tw.Flush() | ||||
| 		fmt.Fprintln(o.W) | ||||
| 	} | ||||
| 	fmt.Fprintf(o.W, " ✖ %d problems (%d errors, %d warnings)\n", | ||||
| 		total, errors, warnings) | ||||
| } | ||||
							
								
								
									
										7
									
								
								vendor/honnef.co/go/tools/lint/lintutil/stats.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										7
									
								
								vendor/honnef.co/go/tools/lint/lintutil/stats.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,7 @@ | ||||
| // +build !aix,!android,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris | ||||
|  | ||||
| package lintutil | ||||
|  | ||||
| import "os" | ||||
|  | ||||
| var infoSignals = []os.Signal{} | ||||
							
								
								
									
										10
									
								
								vendor/honnef.co/go/tools/lint/lintutil/stats_bsd.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										10
									
								
								vendor/honnef.co/go/tools/lint/lintutil/stats_bsd.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,10 @@ | ||||
| // +build darwin dragonfly freebsd netbsd openbsd | ||||
|  | ||||
| package lintutil | ||||
|  | ||||
| import ( | ||||
| 	"os" | ||||
| 	"syscall" | ||||
| ) | ||||
|  | ||||
| var infoSignals = []os.Signal{syscall.SIGINFO} | ||||
							
								
								
									
										10
									
								
								vendor/honnef.co/go/tools/lint/lintutil/stats_posix.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										10
									
								
								vendor/honnef.co/go/tools/lint/lintutil/stats_posix.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,10 @@ | ||||
| // +build aix android linux solaris | ||||
|  | ||||
| package lintutil | ||||
|  | ||||
| import ( | ||||
| 	"os" | ||||
| 	"syscall" | ||||
| ) | ||||
|  | ||||
| var infoSignals = []os.Signal{syscall.SIGUSR1} | ||||
							
								
								
									
										392
									
								
								vendor/honnef.co/go/tools/lint/lintutil/util.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										392
									
								
								vendor/honnef.co/go/tools/lint/lintutil/util.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,392 @@ | ||||
| // Copyright (c) 2013 The Go Authors. All rights reserved. | ||||
| // | ||||
| // Use of this source code is governed by a BSD-style | ||||
| // license that can be found in the LICENSE file or at | ||||
| // https://developers.google.com/open-source/licenses/bsd. | ||||
|  | ||||
| // Package lintutil provides helpers for writing linter command lines. | ||||
| package lintutil // import "honnef.co/go/tools/lint/lintutil" | ||||
|  | ||||
| import ( | ||||
| 	"crypto/sha256" | ||||
| 	"errors" | ||||
| 	"flag" | ||||
| 	"fmt" | ||||
| 	"go/build" | ||||
| 	"go/token" | ||||
| 	"io" | ||||
| 	"log" | ||||
| 	"os" | ||||
| 	"os/signal" | ||||
| 	"regexp" | ||||
| 	"runtime" | ||||
| 	"runtime/pprof" | ||||
| 	"strconv" | ||||
| 	"strings" | ||||
| 	"sync/atomic" | ||||
|  | ||||
| 	"honnef.co/go/tools/config" | ||||
| 	"honnef.co/go/tools/internal/cache" | ||||
| 	"honnef.co/go/tools/lint" | ||||
| 	"honnef.co/go/tools/lint/lintutil/format" | ||||
| 	"honnef.co/go/tools/version" | ||||
|  | ||||
| 	"golang.org/x/tools/go/analysis" | ||||
| 	"golang.org/x/tools/go/buildutil" | ||||
| 	"golang.org/x/tools/go/packages" | ||||
| ) | ||||
|  | ||||
| func NewVersionFlag() flag.Getter { | ||||
| 	tags := build.Default.ReleaseTags | ||||
| 	v := tags[len(tags)-1][2:] | ||||
| 	version := new(VersionFlag) | ||||
| 	if err := version.Set(v); err != nil { | ||||
| 		panic(fmt.Sprintf("internal error: %s", err)) | ||||
| 	} | ||||
| 	return version | ||||
| } | ||||
|  | ||||
| type VersionFlag int | ||||
|  | ||||
| func (v *VersionFlag) String() string { | ||||
| 	return fmt.Sprintf("1.%d", *v) | ||||
|  | ||||
| } | ||||
|  | ||||
| func (v *VersionFlag) Set(s string) error { | ||||
| 	if len(s) < 3 { | ||||
| 		return errors.New("invalid Go version") | ||||
| 	} | ||||
| 	if s[0] != '1' { | ||||
| 		return errors.New("invalid Go version") | ||||
| 	} | ||||
| 	if s[1] != '.' { | ||||
| 		return errors.New("invalid Go version") | ||||
| 	} | ||||
| 	i, err := strconv.Atoi(s[2:]) | ||||
| 	*v = VersionFlag(i) | ||||
| 	return err | ||||
| } | ||||
|  | ||||
| func (v *VersionFlag) Get() interface{} { | ||||
| 	return int(*v) | ||||
| } | ||||
|  | ||||
| func usage(name string, flags *flag.FlagSet) func() { | ||||
| 	return func() { | ||||
| 		fmt.Fprintf(os.Stderr, "Usage of %s:\n", name) | ||||
| 		fmt.Fprintf(os.Stderr, "\t%s [flags] # runs on package in current directory\n", name) | ||||
| 		fmt.Fprintf(os.Stderr, "\t%s [flags] packages\n", name) | ||||
| 		fmt.Fprintf(os.Stderr, "\t%s [flags] directory\n", name) | ||||
| 		fmt.Fprintf(os.Stderr, "\t%s [flags] files... # must be a single package\n", name) | ||||
| 		fmt.Fprintf(os.Stderr, "Flags:\n") | ||||
| 		flags.PrintDefaults() | ||||
| 	} | ||||
| } | ||||
|  | ||||
| type list []string | ||||
|  | ||||
| func (list *list) String() string { | ||||
| 	return `"` + strings.Join(*list, ",") + `"` | ||||
| } | ||||
|  | ||||
| func (list *list) Set(s string) error { | ||||
| 	if s == "" { | ||||
| 		*list = nil | ||||
| 		return nil | ||||
| 	} | ||||
|  | ||||
| 	*list = strings.Split(s, ",") | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func FlagSet(name string) *flag.FlagSet { | ||||
| 	flags := flag.NewFlagSet("", flag.ExitOnError) | ||||
| 	flags.Usage = usage(name, flags) | ||||
| 	flags.String("tags", "", "List of `build tags`") | ||||
| 	flags.Bool("tests", true, "Include tests") | ||||
| 	flags.Bool("version", false, "Print version and exit") | ||||
| 	flags.Bool("show-ignored", false, "Don't filter ignored problems") | ||||
| 	flags.String("f", "text", "Output `format` (valid choices are 'stylish', 'text' and 'json')") | ||||
| 	flags.String("explain", "", "Print description of `check`") | ||||
|  | ||||
| 	flags.String("debug.cpuprofile", "", "Write CPU profile to `file`") | ||||
| 	flags.String("debug.memprofile", "", "Write memory profile to `file`") | ||||
| 	flags.Bool("debug.version", false, "Print detailed version information about this program") | ||||
| 	flags.Bool("debug.no-compile-errors", false, "Don't print compile errors") | ||||
|  | ||||
| 	checks := list{"inherit"} | ||||
| 	fail := list{"all"} | ||||
| 	flags.Var(&checks, "checks", "Comma-separated list of `checks` to enable.") | ||||
| 	flags.Var(&fail, "fail", "Comma-separated list of `checks` that can cause a non-zero exit status.") | ||||
|  | ||||
| 	tags := build.Default.ReleaseTags | ||||
| 	v := tags[len(tags)-1][2:] | ||||
| 	version := new(VersionFlag) | ||||
| 	if err := version.Set(v); err != nil { | ||||
| 		panic(fmt.Sprintf("internal error: %s", err)) | ||||
| 	} | ||||
|  | ||||
| 	flags.Var(version, "go", "Target Go `version` in the format '1.x'") | ||||
| 	return flags | ||||
| } | ||||
|  | ||||
| func findCheck(cs []*analysis.Analyzer, check string) (*analysis.Analyzer, bool) { | ||||
| 	for _, c := range cs { | ||||
| 		if c.Name == check { | ||||
| 			return c, true | ||||
| 		} | ||||
| 	} | ||||
| 	return nil, false | ||||
| } | ||||
|  | ||||
| func ProcessFlagSet(cs []*analysis.Analyzer, cums []lint.CumulativeChecker, fs *flag.FlagSet) { | ||||
| 	tags := fs.Lookup("tags").Value.(flag.Getter).Get().(string) | ||||
| 	tests := fs.Lookup("tests").Value.(flag.Getter).Get().(bool) | ||||
| 	goVersion := fs.Lookup("go").Value.(flag.Getter).Get().(int) | ||||
| 	formatter := fs.Lookup("f").Value.(flag.Getter).Get().(string) | ||||
| 	printVersion := fs.Lookup("version").Value.(flag.Getter).Get().(bool) | ||||
| 	showIgnored := fs.Lookup("show-ignored").Value.(flag.Getter).Get().(bool) | ||||
| 	explain := fs.Lookup("explain").Value.(flag.Getter).Get().(string) | ||||
|  | ||||
| 	cpuProfile := fs.Lookup("debug.cpuprofile").Value.(flag.Getter).Get().(string) | ||||
| 	memProfile := fs.Lookup("debug.memprofile").Value.(flag.Getter).Get().(string) | ||||
| 	debugVersion := fs.Lookup("debug.version").Value.(flag.Getter).Get().(bool) | ||||
| 	debugNoCompile := fs.Lookup("debug.no-compile-errors").Value.(flag.Getter).Get().(bool) | ||||
|  | ||||
| 	cfg := config.Config{} | ||||
| 	cfg.Checks = *fs.Lookup("checks").Value.(*list) | ||||
|  | ||||
| 	exit := func(code int) { | ||||
| 		if cpuProfile != "" { | ||||
| 			pprof.StopCPUProfile() | ||||
| 		} | ||||
| 		if memProfile != "" { | ||||
| 			f, err := os.Create(memProfile) | ||||
| 			if err != nil { | ||||
| 				panic(err) | ||||
| 			} | ||||
| 			runtime.GC() | ||||
| 			pprof.WriteHeapProfile(f) | ||||
| 		} | ||||
| 		os.Exit(code) | ||||
| 	} | ||||
| 	if cpuProfile != "" { | ||||
| 		f, err := os.Create(cpuProfile) | ||||
| 		if err != nil { | ||||
| 			log.Fatal(err) | ||||
| 		} | ||||
| 		pprof.StartCPUProfile(f) | ||||
| 	} | ||||
|  | ||||
| 	if debugVersion { | ||||
| 		version.Verbose() | ||||
| 		exit(0) | ||||
| 	} | ||||
|  | ||||
| 	if printVersion { | ||||
| 		version.Print() | ||||
| 		exit(0) | ||||
| 	} | ||||
|  | ||||
| 	// Validate that the tags argument is well-formed. go/packages | ||||
| 	// doesn't detect malformed build flags and returns unhelpful | ||||
| 	// errors. | ||||
| 	tf := buildutil.TagsFlag{} | ||||
| 	if err := tf.Set(tags); err != nil { | ||||
| 		fmt.Fprintln(os.Stderr, fmt.Errorf("invalid value %q for flag -tags: %s", tags, err)) | ||||
| 		exit(1) | ||||
| 	} | ||||
|  | ||||
| 	if explain != "" { | ||||
| 		var haystack []*analysis.Analyzer | ||||
| 		haystack = append(haystack, cs...) | ||||
| 		for _, cum := range cums { | ||||
| 			haystack = append(haystack, cum.Analyzer()) | ||||
| 		} | ||||
| 		check, ok := findCheck(haystack, explain) | ||||
| 		if !ok { | ||||
| 			fmt.Fprintln(os.Stderr, "Couldn't find check", explain) | ||||
| 			exit(1) | ||||
| 		} | ||||
| 		if check.Doc == "" { | ||||
| 			fmt.Fprintln(os.Stderr, explain, "has no documentation") | ||||
| 			exit(1) | ||||
| 		} | ||||
| 		fmt.Println(check.Doc) | ||||
| 		exit(0) | ||||
| 	} | ||||
|  | ||||
| 	ps, err := Lint(cs, cums, fs.Args(), &Options{ | ||||
| 		Tags:      tags, | ||||
| 		LintTests: tests, | ||||
| 		GoVersion: goVersion, | ||||
| 		Config:    cfg, | ||||
| 	}) | ||||
| 	if err != nil { | ||||
| 		fmt.Fprintln(os.Stderr, err) | ||||
| 		exit(1) | ||||
| 	} | ||||
|  | ||||
| 	var f format.Formatter | ||||
| 	switch formatter { | ||||
| 	case "text": | ||||
| 		f = format.Text{W: os.Stdout} | ||||
| 	case "stylish": | ||||
| 		f = &format.Stylish{W: os.Stdout} | ||||
| 	case "json": | ||||
| 		f = format.JSON{W: os.Stdout} | ||||
| 	default: | ||||
| 		fmt.Fprintf(os.Stderr, "unsupported output format %q\n", formatter) | ||||
| 		exit(2) | ||||
| 	} | ||||
|  | ||||
| 	var ( | ||||
| 		total    int | ||||
| 		errors   int | ||||
| 		warnings int | ||||
| 	) | ||||
|  | ||||
| 	fail := *fs.Lookup("fail").Value.(*list) | ||||
| 	analyzers := make([]*analysis.Analyzer, len(cs), len(cs)+len(cums)) | ||||
| 	copy(analyzers, cs) | ||||
| 	for _, cum := range cums { | ||||
| 		analyzers = append(analyzers, cum.Analyzer()) | ||||
| 	} | ||||
| 	shouldExit := lint.FilterChecks(analyzers, fail) | ||||
| 	shouldExit["compile"] = true | ||||
|  | ||||
| 	total = len(ps) | ||||
| 	for _, p := range ps { | ||||
| 		if p.Check == "compile" && debugNoCompile { | ||||
| 			continue | ||||
| 		} | ||||
| 		if p.Severity == lint.Ignored && !showIgnored { | ||||
| 			continue | ||||
| 		} | ||||
| 		if shouldExit[p.Check] { | ||||
| 			errors++ | ||||
| 		} else { | ||||
| 			p.Severity = lint.Warning | ||||
| 			warnings++ | ||||
| 		} | ||||
| 		f.Format(p) | ||||
| 	} | ||||
| 	if f, ok := f.(format.Statter); ok { | ||||
| 		f.Stats(total, errors, warnings) | ||||
| 	} | ||||
| 	if errors > 0 { | ||||
| 		exit(1) | ||||
| 	} | ||||
| 	exit(0) | ||||
| } | ||||
|  | ||||
| type Options struct { | ||||
| 	Config config.Config | ||||
|  | ||||
| 	Tags      string | ||||
| 	LintTests bool | ||||
| 	GoVersion int | ||||
| } | ||||
|  | ||||
| func computeSalt() ([]byte, error) { | ||||
| 	if version.Version != "devel" { | ||||
| 		return []byte(version.Version), nil | ||||
| 	} | ||||
| 	p, err := os.Executable() | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	f, err := os.Open(p) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	defer f.Close() | ||||
| 	h := sha256.New() | ||||
| 	if _, err := io.Copy(h, f); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return h.Sum(nil), nil | ||||
| } | ||||
|  | ||||
| func Lint(cs []*analysis.Analyzer, cums []lint.CumulativeChecker, paths []string, opt *Options) ([]lint.Problem, error) { | ||||
| 	salt, err := computeSalt() | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("could not compute salt for cache: %s", err) | ||||
| 	} | ||||
| 	cache.SetSalt(salt) | ||||
|  | ||||
| 	if opt == nil { | ||||
| 		opt = &Options{} | ||||
| 	} | ||||
|  | ||||
| 	l := &lint.Linter{ | ||||
| 		Checkers:           cs, | ||||
| 		CumulativeCheckers: cums, | ||||
| 		GoVersion:          opt.GoVersion, | ||||
| 		Config:             opt.Config, | ||||
| 	} | ||||
| 	cfg := &packages.Config{} | ||||
| 	if opt.LintTests { | ||||
| 		cfg.Tests = true | ||||
| 	} | ||||
| 	if opt.Tags != "" { | ||||
| 		cfg.BuildFlags = append(cfg.BuildFlags, "-tags", opt.Tags) | ||||
| 	} | ||||
|  | ||||
| 	printStats := func() { | ||||
| 		// Individual stats are read atomically, but overall there | ||||
| 		// is no synchronisation. For printing rough progress | ||||
| 		// information, this doesn't matter. | ||||
| 		switch atomic.LoadUint32(&l.Stats.State) { | ||||
| 		case lint.StateInitializing: | ||||
| 			fmt.Fprintln(os.Stderr, "Status: initializing") | ||||
| 		case lint.StateGraph: | ||||
| 			fmt.Fprintln(os.Stderr, "Status: loading package graph") | ||||
| 		case lint.StateProcessing: | ||||
| 			fmt.Fprintf(os.Stderr, "Packages: %d/%d initial, %d/%d total; Workers: %d/%d; Problems: %d\n", | ||||
| 				atomic.LoadUint32(&l.Stats.ProcessedInitialPackages), | ||||
| 				atomic.LoadUint32(&l.Stats.InitialPackages), | ||||
| 				atomic.LoadUint32(&l.Stats.ProcessedPackages), | ||||
| 				atomic.LoadUint32(&l.Stats.TotalPackages), | ||||
| 				atomic.LoadUint32(&l.Stats.ActiveWorkers), | ||||
| 				atomic.LoadUint32(&l.Stats.TotalWorkers), | ||||
| 				atomic.LoadUint32(&l.Stats.Problems), | ||||
| 			) | ||||
| 		case lint.StateCumulative: | ||||
| 			fmt.Fprintln(os.Stderr, "Status: processing cumulative checkers") | ||||
| 		} | ||||
| 	} | ||||
| 	if len(infoSignals) > 0 { | ||||
| 		ch := make(chan os.Signal, 1) | ||||
| 		signal.Notify(ch, infoSignals...) | ||||
| 		defer signal.Stop(ch) | ||||
| 		go func() { | ||||
| 			for range ch { | ||||
| 				printStats() | ||||
| 			} | ||||
| 		}() | ||||
| 	} | ||||
|  | ||||
| 	return l.Lint(cfg, paths) | ||||
| } | ||||
|  | ||||
| var posRe = regexp.MustCompile(`^(.+?):(\d+)(?::(\d+)?)?$`) | ||||
|  | ||||
| func parsePos(pos string) token.Position { | ||||
| 	if pos == "-" || pos == "" { | ||||
| 		return token.Position{} | ||||
| 	} | ||||
| 	parts := posRe.FindStringSubmatch(pos) | ||||
| 	if parts == nil { | ||||
| 		panic(fmt.Sprintf("internal error: malformed position %q", pos)) | ||||
| 	} | ||||
| 	file := parts[1] | ||||
| 	line, _ := strconv.Atoi(parts[2]) | ||||
| 	col, _ := strconv.Atoi(parts[3]) | ||||
| 	return token.Position{ | ||||
| 		Filename: file, | ||||
| 		Line:     line, | ||||
| 		Column:   col, | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										970
									
								
								vendor/honnef.co/go/tools/lint/runner.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										970
									
								
								vendor/honnef.co/go/tools/lint/runner.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,970 @@ | ||||
| package lint | ||||
|  | ||||
| /* | ||||
| Parallelism | ||||
|  | ||||
| Runner implements parallel processing of packages by spawning one | ||||
| goroutine per package in the dependency graph, without any semaphores. | ||||
| Each goroutine initially waits on the completion of all of its | ||||
| dependencies, thus establishing correct order of processing. Once all | ||||
| dependencies finish processing, the goroutine will load the package | ||||
| from export data or source – this loading is guarded by a semaphore, | ||||
| sized according to the number of CPU cores. This way, we only have as | ||||
| many packages occupying memory and CPU resources as there are actual | ||||
| cores to process them. | ||||
|  | ||||
| This combination of unbounded goroutines but bounded package loading | ||||
| means that if we have many parallel, independent subgraphs, they will | ||||
| all execute in parallel, while not wasting resources for long linear | ||||
| chains or trying to process more subgraphs in parallel than the system | ||||
| can handle. | ||||
|  | ||||
| */ | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"encoding/gob" | ||||
| 	"encoding/hex" | ||||
| 	"fmt" | ||||
| 	"go/ast" | ||||
| 	"go/token" | ||||
| 	"go/types" | ||||
| 	"reflect" | ||||
| 	"regexp" | ||||
| 	"runtime" | ||||
| 	"sort" | ||||
| 	"strconv" | ||||
| 	"strings" | ||||
| 	"sync" | ||||
| 	"sync/atomic" | ||||
|  | ||||
| 	"golang.org/x/tools/go/analysis" | ||||
| 	"golang.org/x/tools/go/packages" | ||||
| 	"golang.org/x/tools/go/types/objectpath" | ||||
| 	"honnef.co/go/tools/config" | ||||
| 	"honnef.co/go/tools/facts" | ||||
| 	"honnef.co/go/tools/internal/cache" | ||||
| 	"honnef.co/go/tools/loader" | ||||
| ) | ||||
|  | ||||
| // If enabled, abuse of the go/analysis API will lead to panics | ||||
| const sanityCheck = true | ||||
|  | ||||
| // OPT(dh): for a dependency tree A->B->C->D, if we have cached data | ||||
| // for B, there should be no need to load C and D individually. Go's | ||||
| // export data for B contains all the data we need on types, and our | ||||
| // fact cache could store the union of B, C and D in B. | ||||
| // | ||||
| // This may change unused's behavior, however, as it may observe fewer | ||||
| // interfaces from transitive dependencies. | ||||
|  | ||||
| type Package struct { | ||||
| 	dependents uint64 | ||||
|  | ||||
| 	*packages.Package | ||||
| 	Imports    []*Package | ||||
| 	initial    bool | ||||
| 	fromSource bool | ||||
| 	hash       string | ||||
| 	done       chan struct{} | ||||
|  | ||||
| 	resultsMu sync.Mutex | ||||
| 	// results maps analyzer IDs to analyzer results | ||||
| 	results []*result | ||||
|  | ||||
| 	cfg      *config.Config | ||||
| 	gen      map[string]facts.Generator | ||||
| 	problems []Problem | ||||
| 	ignores  []Ignore | ||||
| 	errs     []error | ||||
|  | ||||
| 	// these slices are indexed by analysis | ||||
| 	facts    []map[types.Object][]analysis.Fact | ||||
| 	pkgFacts [][]analysis.Fact | ||||
|  | ||||
| 	canClearTypes bool | ||||
| } | ||||
|  | ||||
| func (pkg *Package) decUse() { | ||||
| 	atomic.AddUint64(&pkg.dependents, ^uint64(0)) | ||||
| 	if atomic.LoadUint64(&pkg.dependents) == 0 { | ||||
| 		// nobody depends on this package anymore | ||||
| 		if pkg.canClearTypes { | ||||
| 			pkg.Types = nil | ||||
| 		} | ||||
| 		pkg.facts = nil | ||||
| 		pkg.pkgFacts = nil | ||||
|  | ||||
| 		for _, imp := range pkg.Imports { | ||||
| 			imp.decUse() | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| type result struct { | ||||
| 	v     interface{} | ||||
| 	err   error | ||||
| 	ready chan struct{} | ||||
| } | ||||
|  | ||||
| type Runner struct { | ||||
| 	ld    loader.Loader | ||||
| 	cache *cache.Cache | ||||
|  | ||||
| 	analyzerIDs analyzerIDs | ||||
|  | ||||
| 	// limits parallelism of loading packages | ||||
| 	loadSem chan struct{} | ||||
|  | ||||
| 	goVersion int | ||||
| 	stats     *Stats | ||||
| } | ||||
|  | ||||
| type analyzerIDs struct { | ||||
| 	m map[*analysis.Analyzer]int | ||||
| } | ||||
|  | ||||
| func (ids analyzerIDs) get(a *analysis.Analyzer) int { | ||||
| 	id, ok := ids.m[a] | ||||
| 	if !ok { | ||||
| 		panic(fmt.Sprintf("no analyzer ID for %s", a.Name)) | ||||
| 	} | ||||
| 	return id | ||||
| } | ||||
|  | ||||
| type Fact struct { | ||||
| 	Path string | ||||
| 	Fact analysis.Fact | ||||
| } | ||||
|  | ||||
| type analysisAction struct { | ||||
| 	analyzer        *analysis.Analyzer | ||||
| 	analyzerID      int | ||||
| 	pkg             *Package | ||||
| 	newPackageFacts []analysis.Fact | ||||
| 	problems        []Problem | ||||
|  | ||||
| 	pkgFacts map[*types.Package][]analysis.Fact | ||||
| } | ||||
|  | ||||
| func (ac *analysisAction) String() string { | ||||
| 	return fmt.Sprintf("%s @ %s", ac.analyzer, ac.pkg) | ||||
| } | ||||
|  | ||||
| func (ac *analysisAction) allObjectFacts() []analysis.ObjectFact { | ||||
| 	out := make([]analysis.ObjectFact, 0, len(ac.pkg.facts[ac.analyzerID])) | ||||
| 	for obj, facts := range ac.pkg.facts[ac.analyzerID] { | ||||
| 		for _, fact := range facts { | ||||
| 			out = append(out, analysis.ObjectFact{ | ||||
| 				Object: obj, | ||||
| 				Fact:   fact, | ||||
| 			}) | ||||
| 		} | ||||
| 	} | ||||
| 	return out | ||||
| } | ||||
|  | ||||
| func (ac *analysisAction) allPackageFacts() []analysis.PackageFact { | ||||
| 	out := make([]analysis.PackageFact, 0, len(ac.pkgFacts)) | ||||
| 	for pkg, facts := range ac.pkgFacts { | ||||
| 		for _, fact := range facts { | ||||
| 			out = append(out, analysis.PackageFact{ | ||||
| 				Package: pkg, | ||||
| 				Fact:    fact, | ||||
| 			}) | ||||
| 		} | ||||
| 	} | ||||
| 	return out | ||||
| } | ||||
|  | ||||
| func (ac *analysisAction) importObjectFact(obj types.Object, fact analysis.Fact) bool { | ||||
| 	if sanityCheck && len(ac.analyzer.FactTypes) == 0 { | ||||
| 		panic("analysis doesn't export any facts") | ||||
| 	} | ||||
| 	for _, f := range ac.pkg.facts[ac.analyzerID][obj] { | ||||
| 		if reflect.TypeOf(f) == reflect.TypeOf(fact) { | ||||
| 			reflect.ValueOf(fact).Elem().Set(reflect.ValueOf(f).Elem()) | ||||
| 			return true | ||||
| 		} | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| func (ac *analysisAction) importPackageFact(pkg *types.Package, fact analysis.Fact) bool { | ||||
| 	if sanityCheck && len(ac.analyzer.FactTypes) == 0 { | ||||
| 		panic("analysis doesn't export any facts") | ||||
| 	} | ||||
| 	for _, f := range ac.pkgFacts[pkg] { | ||||
| 		if reflect.TypeOf(f) == reflect.TypeOf(fact) { | ||||
| 			reflect.ValueOf(fact).Elem().Set(reflect.ValueOf(f).Elem()) | ||||
| 			return true | ||||
| 		} | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| func (ac *analysisAction) exportObjectFact(obj types.Object, fact analysis.Fact) { | ||||
| 	if sanityCheck && len(ac.analyzer.FactTypes) == 0 { | ||||
| 		panic("analysis doesn't export any facts") | ||||
| 	} | ||||
| 	ac.pkg.facts[ac.analyzerID][obj] = append(ac.pkg.facts[ac.analyzerID][obj], fact) | ||||
| } | ||||
|  | ||||
| func (ac *analysisAction) exportPackageFact(fact analysis.Fact) { | ||||
| 	if sanityCheck && len(ac.analyzer.FactTypes) == 0 { | ||||
| 		panic("analysis doesn't export any facts") | ||||
| 	} | ||||
| 	ac.pkgFacts[ac.pkg.Types] = append(ac.pkgFacts[ac.pkg.Types], fact) | ||||
| 	ac.newPackageFacts = append(ac.newPackageFacts, fact) | ||||
| } | ||||
|  | ||||
| func (ac *analysisAction) report(pass *analysis.Pass, d analysis.Diagnostic) { | ||||
| 	p := Problem{ | ||||
| 		Pos:     DisplayPosition(pass.Fset, d.Pos), | ||||
| 		End:     DisplayPosition(pass.Fset, d.End), | ||||
| 		Message: d.Message, | ||||
| 		Check:   pass.Analyzer.Name, | ||||
| 	} | ||||
| 	ac.problems = append(ac.problems, p) | ||||
| } | ||||
|  | ||||
| func (r *Runner) runAnalysis(ac *analysisAction) (ret interface{}, err error) { | ||||
| 	ac.pkg.resultsMu.Lock() | ||||
| 	res := ac.pkg.results[r.analyzerIDs.get(ac.analyzer)] | ||||
| 	if res != nil { | ||||
| 		ac.pkg.resultsMu.Unlock() | ||||
| 		<-res.ready | ||||
| 		return res.v, res.err | ||||
| 	} else { | ||||
| 		res = &result{ | ||||
| 			ready: make(chan struct{}), | ||||
| 		} | ||||
| 		ac.pkg.results[r.analyzerIDs.get(ac.analyzer)] = res | ||||
| 		ac.pkg.resultsMu.Unlock() | ||||
|  | ||||
| 		defer func() { | ||||
| 			res.v = ret | ||||
| 			res.err = err | ||||
| 			close(res.ready) | ||||
| 		}() | ||||
|  | ||||
| 		pass := new(analysis.Pass) | ||||
| 		*pass = analysis.Pass{ | ||||
| 			Analyzer: ac.analyzer, | ||||
| 			Fset:     ac.pkg.Fset, | ||||
| 			Files:    ac.pkg.Syntax, | ||||
| 			// type information may be nil or may be populated. if it is | ||||
| 			// nil, it will get populated later. | ||||
| 			Pkg:               ac.pkg.Types, | ||||
| 			TypesInfo:         ac.pkg.TypesInfo, | ||||
| 			TypesSizes:        ac.pkg.TypesSizes, | ||||
| 			ResultOf:          map[*analysis.Analyzer]interface{}{}, | ||||
| 			ImportObjectFact:  ac.importObjectFact, | ||||
| 			ImportPackageFact: ac.importPackageFact, | ||||
| 			ExportObjectFact:  ac.exportObjectFact, | ||||
| 			ExportPackageFact: ac.exportPackageFact, | ||||
| 			Report: func(d analysis.Diagnostic) { | ||||
| 				ac.report(pass, d) | ||||
| 			}, | ||||
| 			AllObjectFacts:  ac.allObjectFacts, | ||||
| 			AllPackageFacts: ac.allPackageFacts, | ||||
| 		} | ||||
|  | ||||
| 		if !ac.pkg.initial { | ||||
| 			// Don't report problems in dependencies | ||||
| 			pass.Report = func(analysis.Diagnostic) {} | ||||
| 		} | ||||
| 		return r.runAnalysisUser(pass, ac) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (r *Runner) loadCachedFacts(a *analysis.Analyzer, pkg *Package) ([]Fact, bool) { | ||||
| 	if len(a.FactTypes) == 0 { | ||||
| 		return nil, true | ||||
| 	} | ||||
|  | ||||
| 	var facts []Fact | ||||
| 	// Look in the cache for facts | ||||
| 	aID, err := passActionID(pkg, a) | ||||
| 	if err != nil { | ||||
| 		return nil, false | ||||
| 	} | ||||
| 	aID = cache.Subkey(aID, "facts") | ||||
| 	b, _, err := r.cache.GetBytes(aID) | ||||
| 	if err != nil { | ||||
| 		// No cached facts, analyse this package like a user-provided one, but ignore diagnostics | ||||
| 		return nil, false | ||||
| 	} | ||||
|  | ||||
| 	if err := gob.NewDecoder(bytes.NewReader(b)).Decode(&facts); err != nil { | ||||
| 		// Cached facts are broken, analyse this package like a user-provided one, but ignore diagnostics | ||||
| 		return nil, false | ||||
| 	} | ||||
| 	return facts, true | ||||
| } | ||||
|  | ||||
| type dependencyError struct { | ||||
| 	dep string | ||||
| 	err error | ||||
| } | ||||
|  | ||||
| func (err dependencyError) nested() dependencyError { | ||||
| 	if o, ok := err.err.(dependencyError); ok { | ||||
| 		return o.nested() | ||||
| 	} | ||||
| 	return err | ||||
| } | ||||
|  | ||||
| func (err dependencyError) Error() string { | ||||
| 	if o, ok := err.err.(dependencyError); ok { | ||||
| 		return o.Error() | ||||
| 	} | ||||
| 	return fmt.Sprintf("error running dependency %s: %s", err.dep, err.err) | ||||
| } | ||||
|  | ||||
| func (r *Runner) makeAnalysisAction(a *analysis.Analyzer, pkg *Package) *analysisAction { | ||||
| 	aid := r.analyzerIDs.get(a) | ||||
| 	ac := &analysisAction{ | ||||
| 		analyzer:   a, | ||||
| 		analyzerID: aid, | ||||
| 		pkg:        pkg, | ||||
| 	} | ||||
|  | ||||
| 	if len(a.FactTypes) == 0 { | ||||
| 		return ac | ||||
| 	} | ||||
|  | ||||
| 	// Merge all package facts of dependencies | ||||
| 	ac.pkgFacts = map[*types.Package][]analysis.Fact{} | ||||
| 	seen := map[*Package]struct{}{} | ||||
| 	var dfs func(*Package) | ||||
| 	dfs = func(pkg *Package) { | ||||
| 		if _, ok := seen[pkg]; ok { | ||||
| 			return | ||||
| 		} | ||||
| 		seen[pkg] = struct{}{} | ||||
| 		s := pkg.pkgFacts[aid] | ||||
| 		ac.pkgFacts[pkg.Types] = s[0:len(s):len(s)] | ||||
| 		for _, imp := range pkg.Imports { | ||||
| 			dfs(imp) | ||||
| 		} | ||||
| 	} | ||||
| 	dfs(pkg) | ||||
|  | ||||
| 	return ac | ||||
| } | ||||
|  | ||||
| // analyzes that we always want to run, even if they're not being run | ||||
| // explicitly or as dependencies. these are necessary for the inner | ||||
| // workings of the runner. | ||||
| var injectedAnalyses = []*analysis.Analyzer{facts.Generated, config.Analyzer} | ||||
|  | ||||
| func (r *Runner) runAnalysisUser(pass *analysis.Pass, ac *analysisAction) (interface{}, error) { | ||||
| 	if !ac.pkg.fromSource { | ||||
| 		panic(fmt.Sprintf("internal error: %s was not loaded from source", ac.pkg)) | ||||
| 	} | ||||
|  | ||||
| 	// User-provided package, analyse it | ||||
| 	// First analyze it with dependencies | ||||
| 	for _, req := range ac.analyzer.Requires { | ||||
| 		acReq := r.makeAnalysisAction(req, ac.pkg) | ||||
| 		ret, err := r.runAnalysis(acReq) | ||||
| 		if err != nil { | ||||
| 			// We couldn't run a dependency, no point in going on | ||||
| 			return nil, dependencyError{req.Name, err} | ||||
| 		} | ||||
|  | ||||
| 		pass.ResultOf[req] = ret | ||||
| 	} | ||||
|  | ||||
| 	// Then with this analyzer | ||||
| 	ret, err := ac.analyzer.Run(pass) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	if len(ac.analyzer.FactTypes) > 0 { | ||||
| 		// Merge new facts into the package and persist them. | ||||
| 		var facts []Fact | ||||
| 		for _, fact := range ac.newPackageFacts { | ||||
| 			id := r.analyzerIDs.get(ac.analyzer) | ||||
| 			ac.pkg.pkgFacts[id] = append(ac.pkg.pkgFacts[id], fact) | ||||
| 			facts = append(facts, Fact{"", fact}) | ||||
| 		} | ||||
| 		for obj, afacts := range ac.pkg.facts[ac.analyzerID] { | ||||
| 			if obj.Pkg() != ac.pkg.Package.Types { | ||||
| 				continue | ||||
| 			} | ||||
| 			path, err := objectpath.For(obj) | ||||
| 			if err != nil { | ||||
| 				continue | ||||
| 			} | ||||
| 			for _, fact := range afacts { | ||||
| 				facts = append(facts, Fact{string(path), fact}) | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		buf := &bytes.Buffer{} | ||||
| 		if err := gob.NewEncoder(buf).Encode(facts); err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		aID, err := passActionID(ac.pkg, ac.analyzer) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		aID = cache.Subkey(aID, "facts") | ||||
| 		if err := r.cache.PutBytes(aID, buf.Bytes()); err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return ret, nil | ||||
| } | ||||
|  | ||||
| func NewRunner(stats *Stats) (*Runner, error) { | ||||
| 	cache, err := cache.Default() | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	return &Runner{ | ||||
| 		cache: cache, | ||||
| 		stats: stats, | ||||
| 	}, nil | ||||
| } | ||||
|  | ||||
| // Run loads packages corresponding to patterns and analyses them with | ||||
| // analyzers. It returns the loaded packages, which contain reported | ||||
| // diagnostics as well as extracted ignore directives. | ||||
| // | ||||
| // Note that diagnostics have not been filtered at this point yet, to | ||||
| // accomodate cumulative analyzes that require additional steps to | ||||
| // produce diagnostics. | ||||
| func (r *Runner) Run(cfg *packages.Config, patterns []string, analyzers []*analysis.Analyzer, hasCumulative bool) ([]*Package, error) { | ||||
| 	r.analyzerIDs = analyzerIDs{m: map[*analysis.Analyzer]int{}} | ||||
| 	id := 0 | ||||
| 	seen := map[*analysis.Analyzer]struct{}{} | ||||
| 	var dfs func(a *analysis.Analyzer) | ||||
| 	dfs = func(a *analysis.Analyzer) { | ||||
| 		if _, ok := seen[a]; ok { | ||||
| 			return | ||||
| 		} | ||||
| 		seen[a] = struct{}{} | ||||
| 		r.analyzerIDs.m[a] = id | ||||
| 		id++ | ||||
| 		for _, f := range a.FactTypes { | ||||
| 			gob.Register(f) | ||||
| 		} | ||||
| 		for _, req := range a.Requires { | ||||
| 			dfs(req) | ||||
| 		} | ||||
| 	} | ||||
| 	for _, a := range analyzers { | ||||
| 		if v := a.Flags.Lookup("go"); v != nil { | ||||
| 			v.Value.Set(fmt.Sprintf("1.%d", r.goVersion)) | ||||
| 		} | ||||
| 		dfs(a) | ||||
| 	} | ||||
| 	for _, a := range injectedAnalyses { | ||||
| 		dfs(a) | ||||
| 	} | ||||
|  | ||||
| 	var dcfg packages.Config | ||||
| 	if cfg != nil { | ||||
| 		dcfg = *cfg | ||||
| 	} | ||||
|  | ||||
| 	atomic.StoreUint32(&r.stats.State, StateGraph) | ||||
| 	initialPkgs, err := r.ld.Graph(dcfg, patterns...) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	defer r.cache.Trim() | ||||
|  | ||||
| 	var allPkgs []*Package | ||||
| 	m := map[*packages.Package]*Package{} | ||||
| 	packages.Visit(initialPkgs, nil, func(l *packages.Package) { | ||||
| 		m[l] = &Package{ | ||||
| 			Package:  l, | ||||
| 			results:  make([]*result, len(r.analyzerIDs.m)), | ||||
| 			facts:    make([]map[types.Object][]analysis.Fact, len(r.analyzerIDs.m)), | ||||
| 			pkgFacts: make([][]analysis.Fact, len(r.analyzerIDs.m)), | ||||
| 			done:     make(chan struct{}), | ||||
| 			// every package needs itself | ||||
| 			dependents:    1, | ||||
| 			canClearTypes: !hasCumulative, | ||||
| 		} | ||||
| 		allPkgs = append(allPkgs, m[l]) | ||||
| 		for i := range m[l].facts { | ||||
| 			m[l].facts[i] = map[types.Object][]analysis.Fact{} | ||||
| 		} | ||||
| 		for _, err := range l.Errors { | ||||
| 			m[l].errs = append(m[l].errs, err) | ||||
| 		} | ||||
| 		for _, v := range l.Imports { | ||||
| 			m[v].dependents++ | ||||
| 			m[l].Imports = append(m[l].Imports, m[v]) | ||||
| 		} | ||||
|  | ||||
| 		m[l].hash, err = packageHash(m[l]) | ||||
| 		if err != nil { | ||||
| 			m[l].errs = append(m[l].errs, err) | ||||
| 		} | ||||
| 	}) | ||||
|  | ||||
| 	pkgs := make([]*Package, len(initialPkgs)) | ||||
| 	for i, l := range initialPkgs { | ||||
| 		pkgs[i] = m[l] | ||||
| 		pkgs[i].initial = true | ||||
| 	} | ||||
|  | ||||
| 	atomic.StoreUint32(&r.stats.InitialPackages, uint32(len(initialPkgs))) | ||||
| 	atomic.StoreUint32(&r.stats.TotalPackages, uint32(len(allPkgs))) | ||||
| 	atomic.StoreUint32(&r.stats.State, StateProcessing) | ||||
|  | ||||
| 	var wg sync.WaitGroup | ||||
| 	wg.Add(len(allPkgs)) | ||||
| 	r.loadSem = make(chan struct{}, runtime.GOMAXPROCS(-1)) | ||||
| 	atomic.StoreUint32(&r.stats.TotalWorkers, uint32(cap(r.loadSem))) | ||||
| 	for _, pkg := range allPkgs { | ||||
| 		pkg := pkg | ||||
| 		go func() { | ||||
| 			r.processPkg(pkg, analyzers) | ||||
|  | ||||
| 			if pkg.initial { | ||||
| 				atomic.AddUint32(&r.stats.ProcessedInitialPackages, 1) | ||||
| 			} | ||||
| 			atomic.AddUint32(&r.stats.Problems, uint32(len(pkg.problems))) | ||||
| 			wg.Done() | ||||
| 		}() | ||||
| 	} | ||||
| 	wg.Wait() | ||||
|  | ||||
| 	return pkgs, nil | ||||
| } | ||||
|  | ||||
| var posRe = regexp.MustCompile(`^(.+?):(\d+)(?::(\d+)?)?`) | ||||
|  | ||||
| func parsePos(pos string) (token.Position, int, error) { | ||||
| 	if pos == "-" || pos == "" { | ||||
| 		return token.Position{}, 0, nil | ||||
| 	} | ||||
| 	parts := posRe.FindStringSubmatch(pos) | ||||
| 	if parts == nil { | ||||
| 		return token.Position{}, 0, fmt.Errorf("malformed position %q", pos) | ||||
| 	} | ||||
| 	file := parts[1] | ||||
| 	line, _ := strconv.Atoi(parts[2]) | ||||
| 	col, _ := strconv.Atoi(parts[3]) | ||||
| 	return token.Position{ | ||||
| 		Filename: file, | ||||
| 		Line:     line, | ||||
| 		Column:   col, | ||||
| 	}, len(parts[0]), nil | ||||
| } | ||||
|  | ||||
| // loadPkg loads a Go package. If the package is in the set of initial | ||||
| // packages, it will be loaded from source, otherwise it will be | ||||
| // loaded from export data. In the case that the package was loaded | ||||
| // from export data, cached facts will also be loaded. | ||||
| // | ||||
| // Currently, only cached facts for this package will be loaded, not | ||||
| // for any of its dependencies. | ||||
| func (r *Runner) loadPkg(pkg *Package, analyzers []*analysis.Analyzer) error { | ||||
| 	if pkg.Types != nil { | ||||
| 		panic(fmt.Sprintf("internal error: %s has already been loaded", pkg.Package)) | ||||
| 	} | ||||
|  | ||||
| 	// Load type information | ||||
| 	if pkg.initial { | ||||
| 		// Load package from source | ||||
| 		pkg.fromSource = true | ||||
| 		return r.ld.LoadFromSource(pkg.Package) | ||||
| 	} | ||||
|  | ||||
| 	// Load package from export data | ||||
| 	if err := r.ld.LoadFromExport(pkg.Package); err != nil { | ||||
| 		// We asked Go to give us up to date export data, yet | ||||
| 		// we can't load it. There must be something wrong. | ||||
| 		// | ||||
| 		// Attempt loading from source. This should fail (because | ||||
| 		// otherwise there would be export data); we just want to | ||||
| 		// get the compile errors. If loading from source succeeds | ||||
| 		// we discard the result, anyway. Otherwise we'll fail | ||||
| 		// when trying to reload from export data later. | ||||
| 		// | ||||
| 		// FIXME(dh): we no longer reload from export data, so | ||||
| 		// theoretically we should be able to continue | ||||
| 		pkg.fromSource = true | ||||
| 		if err := r.ld.LoadFromSource(pkg.Package); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		// Make sure this package can't be imported successfully | ||||
| 		pkg.Package.Errors = append(pkg.Package.Errors, packages.Error{ | ||||
| 			Pos:  "-", | ||||
| 			Msg:  fmt.Sprintf("could not load export data: %s", err), | ||||
| 			Kind: packages.ParseError, | ||||
| 		}) | ||||
| 		return fmt.Errorf("could not load export data: %s", err) | ||||
| 	} | ||||
|  | ||||
| 	failed := false | ||||
| 	seen := make([]bool, len(r.analyzerIDs.m)) | ||||
| 	var dfs func(*analysis.Analyzer) | ||||
| 	dfs = func(a *analysis.Analyzer) { | ||||
| 		if seen[r.analyzerIDs.get(a)] { | ||||
| 			return | ||||
| 		} | ||||
| 		seen[r.analyzerIDs.get(a)] = true | ||||
|  | ||||
| 		if len(a.FactTypes) > 0 { | ||||
| 			facts, ok := r.loadCachedFacts(a, pkg) | ||||
| 			if !ok { | ||||
| 				failed = true | ||||
| 				return | ||||
| 			} | ||||
|  | ||||
| 			for _, f := range facts { | ||||
| 				if f.Path == "" { | ||||
| 					// This is a package fact | ||||
| 					pkg.pkgFacts[r.analyzerIDs.get(a)] = append(pkg.pkgFacts[r.analyzerIDs.get(a)], f.Fact) | ||||
| 					continue | ||||
| 				} | ||||
| 				obj, err := objectpath.Object(pkg.Types, objectpath.Path(f.Path)) | ||||
| 				if err != nil { | ||||
| 					// Be lenient about these errors. For example, when | ||||
| 					// analysing io/ioutil from source, we may get a fact | ||||
| 					// for methods on the devNull type, and objectpath | ||||
| 					// will happily create a path for them. However, when | ||||
| 					// we later load io/ioutil from export data, the path | ||||
| 					// no longer resolves. | ||||
| 					// | ||||
| 					// If an exported type embeds the unexported type, | ||||
| 					// then (part of) the unexported type will become part | ||||
| 					// of the type information and our path will resolve | ||||
| 					// again. | ||||
| 					continue | ||||
| 				} | ||||
| 				pkg.facts[r.analyzerIDs.get(a)][obj] = append(pkg.facts[r.analyzerIDs.get(a)][obj], f.Fact) | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		for _, req := range a.Requires { | ||||
| 			dfs(req) | ||||
| 		} | ||||
| 	} | ||||
| 	for _, a := range analyzers { | ||||
| 		dfs(a) | ||||
| 	} | ||||
|  | ||||
| 	if failed { | ||||
| 		pkg.fromSource = true | ||||
| 		// XXX we added facts to the maps, we need to get rid of those | ||||
| 		return r.ld.LoadFromSource(pkg.Package) | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| type analysisError struct { | ||||
| 	analyzer *analysis.Analyzer | ||||
| 	pkg      *Package | ||||
| 	err      error | ||||
| } | ||||
|  | ||||
| func (err analysisError) Error() string { | ||||
| 	return fmt.Sprintf("error running analyzer %s on %s: %s", err.analyzer, err.pkg, err.err) | ||||
| } | ||||
|  | ||||
| // processPkg processes a package. This involves loading the package, | ||||
| // either from export data or from source. For packages loaded from | ||||
| // source, the provides analyzers will be run on the package. | ||||
| func (r *Runner) processPkg(pkg *Package, analyzers []*analysis.Analyzer) { | ||||
| 	defer func() { | ||||
| 		// Clear information we no longer need. Make sure to do this | ||||
| 		// when returning from processPkg so that we clear | ||||
| 		// dependencies, not just initial packages. | ||||
| 		pkg.TypesInfo = nil | ||||
| 		pkg.Syntax = nil | ||||
| 		pkg.results = nil | ||||
|  | ||||
| 		atomic.AddUint32(&r.stats.ProcessedPackages, 1) | ||||
| 		pkg.decUse() | ||||
| 		close(pkg.done) | ||||
| 	}() | ||||
|  | ||||
| 	// Ensure all packages have the generated map and config. This is | ||||
| 	// required by interna of the runner. Analyses that themselves | ||||
| 	// make use of either have an explicit dependency so that other | ||||
| 	// runners work correctly, too. | ||||
| 	analyzers = append(analyzers[0:len(analyzers):len(analyzers)], injectedAnalyses...) | ||||
|  | ||||
| 	if len(pkg.errs) != 0 { | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	for _, imp := range pkg.Imports { | ||||
| 		<-imp.done | ||||
| 		if len(imp.errs) > 0 { | ||||
| 			if imp.initial { | ||||
| 				// Don't print the error of the dependency since it's | ||||
| 				// an initial package and we're already printing the | ||||
| 				// error. | ||||
| 				pkg.errs = append(pkg.errs, fmt.Errorf("could not analyze dependency %s of %s", imp, pkg)) | ||||
| 			} else { | ||||
| 				var s string | ||||
| 				for _, err := range imp.errs { | ||||
| 					s += "\n\t" + err.Error() | ||||
| 				} | ||||
| 				pkg.errs = append(pkg.errs, fmt.Errorf("could not analyze dependency %s of %s: %s", imp, pkg, s)) | ||||
| 			} | ||||
| 			return | ||||
| 		} | ||||
| 	} | ||||
| 	if pkg.PkgPath == "unsafe" { | ||||
| 		pkg.Types = types.Unsafe | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	r.loadSem <- struct{}{} | ||||
| 	atomic.AddUint32(&r.stats.ActiveWorkers, 1) | ||||
| 	defer func() { | ||||
| 		<-r.loadSem | ||||
| 		atomic.AddUint32(&r.stats.ActiveWorkers, ^uint32(0)) | ||||
| 	}() | ||||
| 	if err := r.loadPkg(pkg, analyzers); err != nil { | ||||
| 		pkg.errs = append(pkg.errs, err) | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	// A package's object facts is the union of all of its dependencies. | ||||
| 	for _, imp := range pkg.Imports { | ||||
| 		for ai, m := range imp.facts { | ||||
| 			for obj, facts := range m { | ||||
| 				pkg.facts[ai][obj] = facts[0:len(facts):len(facts)] | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if !pkg.fromSource { | ||||
| 		// Nothing left to do for the package. | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	// Run analyses on initial packages and those missing facts | ||||
| 	var wg sync.WaitGroup | ||||
| 	wg.Add(len(analyzers)) | ||||
| 	errs := make([]error, len(analyzers)) | ||||
| 	var acs []*analysisAction | ||||
| 	for i, a := range analyzers { | ||||
| 		i := i | ||||
| 		a := a | ||||
| 		ac := r.makeAnalysisAction(a, pkg) | ||||
| 		acs = append(acs, ac) | ||||
| 		go func() { | ||||
| 			defer wg.Done() | ||||
| 			// Only initial packages and packages with missing | ||||
| 			// facts will have been loaded from source. | ||||
| 			if pkg.initial || r.hasFacts(a) { | ||||
| 				if _, err := r.runAnalysis(ac); err != nil { | ||||
| 					errs[i] = analysisError{a, pkg, err} | ||||
| 					return | ||||
| 				} | ||||
| 			} | ||||
| 		}() | ||||
| 	} | ||||
| 	wg.Wait() | ||||
|  | ||||
| 	depErrors := map[dependencyError]int{} | ||||
| 	for _, err := range errs { | ||||
| 		if err == nil { | ||||
| 			continue | ||||
| 		} | ||||
| 		switch err := err.(type) { | ||||
| 		case analysisError: | ||||
| 			switch err := err.err.(type) { | ||||
| 			case dependencyError: | ||||
| 				depErrors[err.nested()]++ | ||||
| 			default: | ||||
| 				pkg.errs = append(pkg.errs, err) | ||||
| 			} | ||||
| 		default: | ||||
| 			pkg.errs = append(pkg.errs, err) | ||||
| 		} | ||||
| 	} | ||||
| 	for err, count := range depErrors { | ||||
| 		pkg.errs = append(pkg.errs, | ||||
| 			fmt.Errorf("could not run %s@%s, preventing %d analyzers from running: %s", err.dep, pkg, count, err.err)) | ||||
| 	} | ||||
|  | ||||
| 	// We can't process ignores at this point because `unused` needs | ||||
| 	// to see more than one package to make its decision. | ||||
| 	ignores, problems := parseDirectives(pkg.Package) | ||||
| 	pkg.ignores = append(pkg.ignores, ignores...) | ||||
| 	pkg.problems = append(pkg.problems, problems...) | ||||
| 	for _, ac := range acs { | ||||
| 		pkg.problems = append(pkg.problems, ac.problems...) | ||||
| 	} | ||||
|  | ||||
| 	if pkg.initial { | ||||
| 		// Only initial packages have these analyzers run, and only | ||||
| 		// initial packages need these. | ||||
| 		if pkg.results[r.analyzerIDs.get(config.Analyzer)].v != nil { | ||||
| 			pkg.cfg = pkg.results[r.analyzerIDs.get(config.Analyzer)].v.(*config.Config) | ||||
| 		} | ||||
| 		pkg.gen = pkg.results[r.analyzerIDs.get(facts.Generated)].v.(map[string]facts.Generator) | ||||
| 	} | ||||
|  | ||||
| 	// In a previous version of the code, we would throw away all type | ||||
| 	// information and reload it from export data. That was | ||||
| 	// nonsensical. The *types.Package doesn't keep any information | ||||
| 	// live that export data wouldn't also. We only need to discard | ||||
| 	// the AST and the TypesInfo maps; that happens after we return | ||||
| 	// from processPkg. | ||||
| } | ||||
|  | ||||
| // hasFacts reports whether an analysis exports any facts. An analysis | ||||
| // that has a transitive dependency that exports facts is considered | ||||
| // to be exporting facts. | ||||
| func (r *Runner) hasFacts(a *analysis.Analyzer) bool { | ||||
| 	ret := false | ||||
| 	seen := make([]bool, len(r.analyzerIDs.m)) | ||||
| 	var dfs func(*analysis.Analyzer) | ||||
| 	dfs = func(a *analysis.Analyzer) { | ||||
| 		if seen[r.analyzerIDs.get(a)] { | ||||
| 			return | ||||
| 		} | ||||
| 		seen[r.analyzerIDs.get(a)] = true | ||||
| 		if len(a.FactTypes) > 0 { | ||||
| 			ret = true | ||||
| 		} | ||||
| 		for _, req := range a.Requires { | ||||
| 			if ret { | ||||
| 				break | ||||
| 			} | ||||
| 			dfs(req) | ||||
| 		} | ||||
| 	} | ||||
| 	dfs(a) | ||||
| 	return ret | ||||
| } | ||||
|  | ||||
| func parseDirective(s string) (cmd string, args []string) { | ||||
| 	if !strings.HasPrefix(s, "//lint:") { | ||||
| 		return "", nil | ||||
| 	} | ||||
| 	s = strings.TrimPrefix(s, "//lint:") | ||||
| 	fields := strings.Split(s, " ") | ||||
| 	return fields[0], fields[1:] | ||||
| } | ||||
|  | ||||
| // parseDirectives extracts all linter directives from the source | ||||
| // files of the package. Malformed directives are returned as problems. | ||||
| func parseDirectives(pkg *packages.Package) ([]Ignore, []Problem) { | ||||
| 	var ignores []Ignore | ||||
| 	var problems []Problem | ||||
|  | ||||
| 	for _, f := range pkg.Syntax { | ||||
| 		found := false | ||||
| 	commentLoop: | ||||
| 		for _, cg := range f.Comments { | ||||
| 			for _, c := range cg.List { | ||||
| 				if strings.Contains(c.Text, "//lint:") { | ||||
| 					found = true | ||||
| 					break commentLoop | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 		if !found { | ||||
| 			continue | ||||
| 		} | ||||
| 		cm := ast.NewCommentMap(pkg.Fset, f, f.Comments) | ||||
| 		for node, cgs := range cm { | ||||
| 			for _, cg := range cgs { | ||||
| 				for _, c := range cg.List { | ||||
| 					if !strings.HasPrefix(c.Text, "//lint:") { | ||||
| 						continue | ||||
| 					} | ||||
| 					cmd, args := parseDirective(c.Text) | ||||
| 					switch cmd { | ||||
| 					case "ignore", "file-ignore": | ||||
| 						if len(args) < 2 { | ||||
| 							p := Problem{ | ||||
| 								Pos:      DisplayPosition(pkg.Fset, c.Pos()), | ||||
| 								Message:  "malformed linter directive; missing the required reason field?", | ||||
| 								Severity: Error, | ||||
| 								Check:    "compile", | ||||
| 							} | ||||
| 							problems = append(problems, p) | ||||
| 							continue | ||||
| 						} | ||||
| 					default: | ||||
| 						// unknown directive, ignore | ||||
| 						continue | ||||
| 					} | ||||
| 					checks := strings.Split(args[0], ",") | ||||
| 					pos := DisplayPosition(pkg.Fset, node.Pos()) | ||||
| 					var ig Ignore | ||||
| 					switch cmd { | ||||
| 					case "ignore": | ||||
| 						ig = &LineIgnore{ | ||||
| 							File:   pos.Filename, | ||||
| 							Line:   pos.Line, | ||||
| 							Checks: checks, | ||||
| 							Pos:    c.Pos(), | ||||
| 						} | ||||
| 					case "file-ignore": | ||||
| 						ig = &FileIgnore{ | ||||
| 							File:   pos.Filename, | ||||
| 							Checks: checks, | ||||
| 						} | ||||
| 					} | ||||
| 					ignores = append(ignores, ig) | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return ignores, problems | ||||
| } | ||||
|  | ||||
| // packageHash computes a package's hash. The hash is based on all Go | ||||
| // files that make up the package, as well as the hashes of imported | ||||
| // packages. | ||||
| func packageHash(pkg *Package) (string, error) { | ||||
| 	key := cache.NewHash("package hash") | ||||
| 	fmt.Fprintf(key, "pkgpath %s\n", pkg.PkgPath) | ||||
| 	for _, f := range pkg.CompiledGoFiles { | ||||
| 		h, err := cache.FileHash(f) | ||||
| 		if err != nil { | ||||
| 			return "", err | ||||
| 		} | ||||
| 		fmt.Fprintf(key, "file %s %x\n", f, h) | ||||
| 	} | ||||
|  | ||||
| 	imps := make([]*Package, len(pkg.Imports)) | ||||
| 	copy(imps, pkg.Imports) | ||||
| 	sort.Slice(imps, func(i, j int) bool { | ||||
| 		return imps[i].PkgPath < imps[j].PkgPath | ||||
| 	}) | ||||
| 	for _, dep := range imps { | ||||
| 		if dep.PkgPath == "unsafe" { | ||||
| 			continue | ||||
| 		} | ||||
|  | ||||
| 		fmt.Fprintf(key, "import %s %s\n", dep.PkgPath, dep.hash) | ||||
| 	} | ||||
| 	h := key.Sum() | ||||
| 	return hex.EncodeToString(h[:]), nil | ||||
| } | ||||
|  | ||||
| // passActionID computes an ActionID for an analysis pass. | ||||
| func passActionID(pkg *Package, analyzer *analysis.Analyzer) (cache.ActionID, error) { | ||||
| 	key := cache.NewHash("action ID") | ||||
| 	fmt.Fprintf(key, "pkgpath %s\n", pkg.PkgPath) | ||||
| 	fmt.Fprintf(key, "pkghash %s\n", pkg.hash) | ||||
| 	fmt.Fprintf(key, "analyzer %s\n", analyzer.Name) | ||||
|  | ||||
| 	return key.Sum(), nil | ||||
| } | ||||
							
								
								
									
										20
									
								
								vendor/honnef.co/go/tools/lint/stats.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										20
									
								
								vendor/honnef.co/go/tools/lint/stats.go
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,20 @@ | ||||
| package lint | ||||
|  | ||||
| const ( | ||||
| 	StateInitializing = 0 | ||||
| 	StateGraph        = 1 | ||||
| 	StateProcessing   = 2 | ||||
| 	StateCumulative   = 3 | ||||
| ) | ||||
|  | ||||
| type Stats struct { | ||||
| 	State uint32 | ||||
|  | ||||
| 	InitialPackages          uint32 | ||||
| 	TotalPackages            uint32 | ||||
| 	ProcessedPackages        uint32 | ||||
| 	ProcessedInitialPackages uint32 | ||||
| 	Problems                 uint32 | ||||
| 	ActiveWorkers            uint32 | ||||
| 	TotalWorkers             uint32 | ||||
| } | ||||
							
								
								
									
										27
									
								
								vendor/honnef.co/go/tools/loader/BUILD
									
									
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										27
									
								
								vendor/honnef.co/go/tools/loader/BUILD
									
									
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,27 @@ | ||||
| load("@io_bazel_rules_go//go:def.bzl", "go_library") | ||||
|  | ||||
| go_library( | ||||
|     name = "go_default_library", | ||||
|     srcs = ["loader.go"], | ||||
|     importmap = "k8s.io/kubernetes/vendor/honnef.co/go/tools/loader", | ||||
|     importpath = "honnef.co/go/tools/loader", | ||||
|     visibility = ["//visibility:public"], | ||||
|     deps = [ | ||||
|         "//vendor/golang.org/x/tools/go/gcexportdata:go_default_library", | ||||
|         "//vendor/golang.org/x/tools/go/packages:go_default_library", | ||||
|     ], | ||||
| ) | ||||
|  | ||||
| filegroup( | ||||
|     name = "package-srcs", | ||||
|     srcs = glob(["**"]), | ||||
|     tags = ["automanaged"], | ||||
|     visibility = ["//visibility:private"], | ||||
| ) | ||||
|  | ||||
| filegroup( | ||||
|     name = "all-srcs", | ||||
|     srcs = [":package-srcs"], | ||||
|     tags = ["automanaged"], | ||||
|     visibility = ["//visibility:public"], | ||||
| ) | ||||
Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user
	 Tim Allclair
					Tim Allclair