mirror of
https://github.com/optim-enterprises-bv/kubernetes.git
synced 2025-11-19 07:55:09 +00:00
Bump cAdvisor (and dependencies) godeps version
This commit is contained in:
8
Godeps/.license_file_state
generated
8
Godeps/.license_file_state
generated
@@ -99,10 +99,6 @@ raw.githubusercontent.com/evanphx/json-patch/master/NOTICE
|
|||||||
raw.githubusercontent.com/evanphx/json-patch/master/NOTICE.txt
|
raw.githubusercontent.com/evanphx/json-patch/master/NOTICE.txt
|
||||||
raw.githubusercontent.com/evanphx/json-patch/master/README
|
raw.githubusercontent.com/evanphx/json-patch/master/README
|
||||||
raw.githubusercontent.com/evanphx/json-patch/master/README.md
|
raw.githubusercontent.com/evanphx/json-patch/master/README.md
|
||||||
raw.githubusercontent.com/fsouza/go-dockerclient/master/NOTICE
|
|
||||||
raw.githubusercontent.com/fsouza/go-dockerclient/master/NOTICE.txt
|
|
||||||
raw.githubusercontent.com/fsouza/go-dockerclient/master/README
|
|
||||||
raw.githubusercontent.com/fsouza/go-dockerclient/master/README.md
|
|
||||||
raw.githubusercontent.com/garyburd/redigo/master/LICENSE.code
|
raw.githubusercontent.com/garyburd/redigo/master/LICENSE.code
|
||||||
raw.githubusercontent.com/garyburd/redigo/master/LICENSE.txt
|
raw.githubusercontent.com/garyburd/redigo/master/LICENSE.txt
|
||||||
raw.githubusercontent.com/garyburd/redigo/master/LICENSE.md
|
raw.githubusercontent.com/garyburd/redigo/master/LICENSE.md
|
||||||
@@ -265,6 +261,10 @@ raw.githubusercontent.com/rackspace/gophercloud/master/NOTICE
|
|||||||
raw.githubusercontent.com/rackspace/gophercloud/master/NOTICE.txt
|
raw.githubusercontent.com/rackspace/gophercloud/master/NOTICE.txt
|
||||||
raw.githubusercontent.com/rackspace/gophercloud/master/README
|
raw.githubusercontent.com/rackspace/gophercloud/master/README
|
||||||
raw.githubusercontent.com/rackspace/gophercloud/master/README.md
|
raw.githubusercontent.com/rackspace/gophercloud/master/README.md
|
||||||
|
raw.githubusercontent.com/robfig/cron/master/NOTICE
|
||||||
|
raw.githubusercontent.com/robfig/cron/master/NOTICE.txt
|
||||||
|
raw.githubusercontent.com/robfig/cron/master/README
|
||||||
|
raw.githubusercontent.com/robfig/cron/master/README.md
|
||||||
raw.githubusercontent.com/russross/blackfriday/master/NOTICE
|
raw.githubusercontent.com/russross/blackfriday/master/NOTICE
|
||||||
raw.githubusercontent.com/russross/blackfriday/master/NOTICE.txt
|
raw.githubusercontent.com/russross/blackfriday/master/NOTICE.txt
|
||||||
raw.githubusercontent.com/russross/blackfriday/master/README
|
raw.githubusercontent.com/russross/blackfriday/master/README
|
||||||
|
|||||||
243
Godeps/Godeps.json
generated
243
Godeps/Godeps.json
generated
@@ -215,7 +215,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/codegangsta/negroni",
|
"ImportPath": "github.com/codegangsta/negroni",
|
||||||
"Comment": "v0.1.0-62-g8d75e11",
|
"Comment": "v0.1-62-g8d75e11",
|
||||||
"Rev": "8d75e11374a1928608c906fe745b538483e7aeb2"
|
"Rev": "8d75e11374a1928608c906fe745b538483e7aeb2"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
@@ -742,78 +742,6 @@
|
|||||||
"ImportPath": "github.com/evanphx/json-patch",
|
"ImportPath": "github.com/evanphx/json-patch",
|
||||||
"Rev": "7dd4489c2eb6073e5a9d7746c3274c5b5f0387df"
|
"Rev": "7dd4489c2eb6073e5a9d7746c3274c5b5f0387df"
|
||||||
},
|
},
|
||||||
{
|
|
||||||
"ImportPath": "github.com/fsouza/go-dockerclient",
|
|
||||||
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus",
|
|
||||||
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts",
|
|
||||||
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive",
|
|
||||||
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils",
|
|
||||||
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir",
|
|
||||||
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools",
|
|
||||||
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils",
|
|
||||||
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/longpath",
|
|
||||||
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools",
|
|
||||||
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/promise",
|
|
||||||
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy",
|
|
||||||
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system",
|
|
||||||
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/go-units",
|
|
||||||
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp",
|
|
||||||
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user",
|
|
||||||
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/fsouza/go-dockerclient/external/golang.org/x/net/context",
|
|
||||||
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
|
|
||||||
},
|
|
||||||
{
|
|
||||||
"ImportPath": "github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix",
|
|
||||||
"Rev": "bf97c77db7c945cbcdbf09d56c6f87a66f54537b"
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/garyburd/redigo/internal",
|
"ImportPath": "github.com/garyburd/redigo/internal",
|
||||||
"Rev": "535138d7bcd717d6531c701ef5933d98b1866257"
|
"Rev": "535138d7bcd717d6531c701ef5933d98b1866257"
|
||||||
@@ -978,173 +906,203 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/google/cadvisor/api",
|
"ImportPath": "github.com/google/cadvisor/api",
|
||||||
"Comment": "v0.23.0",
|
"Comment": "v0.23.2",
|
||||||
"Rev": "750f18e5eac3f6193b354fc14c03d92d4318a0ec"
|
"Rev": "7ddf6eb5d1f84363fbc181a498313a880b12ba07"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/google/cadvisor/cache/memory",
|
"ImportPath": "github.com/google/cadvisor/cache/memory",
|
||||||
"Comment": "v0.23.0",
|
"Comment": "v0.23.2",
|
||||||
"Rev": "750f18e5eac3f6193b354fc14c03d92d4318a0ec"
|
"Rev": "7ddf6eb5d1f84363fbc181a498313a880b12ba07"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/google/cadvisor/collector",
|
"ImportPath": "github.com/google/cadvisor/collector",
|
||||||
"Comment": "v0.23.0",
|
"Comment": "v0.23.2",
|
||||||
"Rev": "750f18e5eac3f6193b354fc14c03d92d4318a0ec"
|
"Rev": "7ddf6eb5d1f84363fbc181a498313a880b12ba07"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/google/cadvisor/container",
|
"ImportPath": "github.com/google/cadvisor/container",
|
||||||
"Comment": "v0.23.0",
|
"Comment": "v0.23.2",
|
||||||
"Rev": "750f18e5eac3f6193b354fc14c03d92d4318a0ec"
|
"Rev": "7ddf6eb5d1f84363fbc181a498313a880b12ba07"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/google/cadvisor/container/common",
|
"ImportPath": "github.com/google/cadvisor/container/common",
|
||||||
"Comment": "v0.23.0",
|
"Comment": "v0.23.2",
|
||||||
"Rev": "750f18e5eac3f6193b354fc14c03d92d4318a0ec"
|
"Rev": "7ddf6eb5d1f84363fbc181a498313a880b12ba07"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/google/cadvisor/container/docker",
|
"ImportPath": "github.com/google/cadvisor/container/docker",
|
||||||
"Comment": "v0.23.0",
|
"Comment": "v0.23.2",
|
||||||
"Rev": "750f18e5eac3f6193b354fc14c03d92d4318a0ec"
|
"Rev": "7ddf6eb5d1f84363fbc181a498313a880b12ba07"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/google/cadvisor/container/libcontainer",
|
"ImportPath": "github.com/google/cadvisor/container/libcontainer",
|
||||||
"Comment": "v0.23.0",
|
"Comment": "v0.23.2",
|
||||||
"Rev": "750f18e5eac3f6193b354fc14c03d92d4318a0ec"
|
"Rev": "7ddf6eb5d1f84363fbc181a498313a880b12ba07"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/google/cadvisor/container/raw",
|
"ImportPath": "github.com/google/cadvisor/container/raw",
|
||||||
"Comment": "v0.23.0",
|
"Comment": "v0.23.2",
|
||||||
"Rev": "750f18e5eac3f6193b354fc14c03d92d4318a0ec"
|
"Rev": "7ddf6eb5d1f84363fbc181a498313a880b12ba07"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/google/cadvisor/container/rkt",
|
"ImportPath": "github.com/google/cadvisor/container/rkt",
|
||||||
"Comment": "v0.23.0",
|
"Comment": "v0.23.2",
|
||||||
"Rev": "750f18e5eac3f6193b354fc14c03d92d4318a0ec"
|
"Rev": "7ddf6eb5d1f84363fbc181a498313a880b12ba07"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/google/cadvisor/container/systemd",
|
"ImportPath": "github.com/google/cadvisor/container/systemd",
|
||||||
"Comment": "v0.23.0",
|
"Comment": "v0.23.2",
|
||||||
"Rev": "750f18e5eac3f6193b354fc14c03d92d4318a0ec"
|
"Rev": "7ddf6eb5d1f84363fbc181a498313a880b12ba07"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/google/cadvisor/devicemapper",
|
||||||
|
"Comment": "v0.23.2",
|
||||||
|
"Rev": "7ddf6eb5d1f84363fbc181a498313a880b12ba07"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/google/cadvisor/events",
|
"ImportPath": "github.com/google/cadvisor/events",
|
||||||
"Comment": "v0.23.0",
|
"Comment": "v0.23.2",
|
||||||
"Rev": "750f18e5eac3f6193b354fc14c03d92d4318a0ec"
|
"Rev": "7ddf6eb5d1f84363fbc181a498313a880b12ba07"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/google/cadvisor/fs",
|
"ImportPath": "github.com/google/cadvisor/fs",
|
||||||
"Comment": "v0.23.0",
|
"Comment": "v0.23.2",
|
||||||
"Rev": "750f18e5eac3f6193b354fc14c03d92d4318a0ec"
|
"Rev": "7ddf6eb5d1f84363fbc181a498313a880b12ba07"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/google/cadvisor/healthz",
|
"ImportPath": "github.com/google/cadvisor/healthz",
|
||||||
"Comment": "v0.23.0",
|
"Comment": "v0.23.2",
|
||||||
"Rev": "750f18e5eac3f6193b354fc14c03d92d4318a0ec"
|
"Rev": "7ddf6eb5d1f84363fbc181a498313a880b12ba07"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/google/cadvisor/http",
|
"ImportPath": "github.com/google/cadvisor/http",
|
||||||
"Comment": "v0.23.0",
|
"Comment": "v0.23.2",
|
||||||
"Rev": "750f18e5eac3f6193b354fc14c03d92d4318a0ec"
|
"Rev": "7ddf6eb5d1f84363fbc181a498313a880b12ba07"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/google/cadvisor/http/mux",
|
"ImportPath": "github.com/google/cadvisor/http/mux",
|
||||||
"Comment": "v0.23.0",
|
"Comment": "v0.23.2",
|
||||||
"Rev": "750f18e5eac3f6193b354fc14c03d92d4318a0ec"
|
"Rev": "7ddf6eb5d1f84363fbc181a498313a880b12ba07"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/google/cadvisor/info/v1",
|
"ImportPath": "github.com/google/cadvisor/info/v1",
|
||||||
"Comment": "v0.23.0",
|
"Comment": "v0.23.2",
|
||||||
"Rev": "750f18e5eac3f6193b354fc14c03d92d4318a0ec"
|
"Rev": "7ddf6eb5d1f84363fbc181a498313a880b12ba07"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/google/cadvisor/info/v1/test",
|
"ImportPath": "github.com/google/cadvisor/info/v1/test",
|
||||||
"Comment": "v0.23.0",
|
"Comment": "v0.23.2",
|
||||||
"Rev": "750f18e5eac3f6193b354fc14c03d92d4318a0ec"
|
"Rev": "7ddf6eb5d1f84363fbc181a498313a880b12ba07"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/google/cadvisor/info/v2",
|
"ImportPath": "github.com/google/cadvisor/info/v2",
|
||||||
"Comment": "v0.23.0",
|
"Comment": "v0.23.2",
|
||||||
"Rev": "750f18e5eac3f6193b354fc14c03d92d4318a0ec"
|
"Rev": "7ddf6eb5d1f84363fbc181a498313a880b12ba07"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/google/cadvisor/machine",
|
||||||
|
"Comment": "v0.23.2",
|
||||||
|
"Rev": "7ddf6eb5d1f84363fbc181a498313a880b12ba07"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/google/cadvisor/manager",
|
"ImportPath": "github.com/google/cadvisor/manager",
|
||||||
"Comment": "v0.23.0",
|
"Comment": "v0.23.2",
|
||||||
"Rev": "750f18e5eac3f6193b354fc14c03d92d4318a0ec"
|
"Rev": "7ddf6eb5d1f84363fbc181a498313a880b12ba07"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/google/cadvisor/manager/watcher",
|
||||||
|
"Comment": "v0.23.2",
|
||||||
|
"Rev": "7ddf6eb5d1f84363fbc181a498313a880b12ba07"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/google/cadvisor/manager/watcher/raw",
|
||||||
|
"Comment": "v0.23.2",
|
||||||
|
"Rev": "7ddf6eb5d1f84363fbc181a498313a880b12ba07"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/google/cadvisor/manager/watcher/rkt",
|
||||||
|
"Comment": "v0.23.2",
|
||||||
|
"Rev": "7ddf6eb5d1f84363fbc181a498313a880b12ba07"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/google/cadvisor/metrics",
|
"ImportPath": "github.com/google/cadvisor/metrics",
|
||||||
"Comment": "v0.23.0",
|
"Comment": "v0.23.2",
|
||||||
"Rev": "750f18e5eac3f6193b354fc14c03d92d4318a0ec"
|
"Rev": "7ddf6eb5d1f84363fbc181a498313a880b12ba07"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/google/cadvisor/pages",
|
"ImportPath": "github.com/google/cadvisor/pages",
|
||||||
"Comment": "v0.23.0",
|
"Comment": "v0.23.2",
|
||||||
"Rev": "750f18e5eac3f6193b354fc14c03d92d4318a0ec"
|
"Rev": "7ddf6eb5d1f84363fbc181a498313a880b12ba07"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/google/cadvisor/pages/static",
|
"ImportPath": "github.com/google/cadvisor/pages/static",
|
||||||
"Comment": "v0.23.0",
|
"Comment": "v0.23.2",
|
||||||
"Rev": "750f18e5eac3f6193b354fc14c03d92d4318a0ec"
|
"Rev": "7ddf6eb5d1f84363fbc181a498313a880b12ba07"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/google/cadvisor/storage",
|
"ImportPath": "github.com/google/cadvisor/storage",
|
||||||
"Comment": "v0.23.0",
|
"Comment": "v0.23.2",
|
||||||
"Rev": "750f18e5eac3f6193b354fc14c03d92d4318a0ec"
|
"Rev": "7ddf6eb5d1f84363fbc181a498313a880b12ba07"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/google/cadvisor/summary",
|
"ImportPath": "github.com/google/cadvisor/summary",
|
||||||
"Comment": "v0.23.0",
|
"Comment": "v0.23.2",
|
||||||
"Rev": "750f18e5eac3f6193b354fc14c03d92d4318a0ec"
|
"Rev": "7ddf6eb5d1f84363fbc181a498313a880b12ba07"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/google/cadvisor/utils",
|
"ImportPath": "github.com/google/cadvisor/utils",
|
||||||
"Comment": "v0.23.0",
|
"Comment": "v0.23.2",
|
||||||
"Rev": "750f18e5eac3f6193b354fc14c03d92d4318a0ec"
|
"Rev": "7ddf6eb5d1f84363fbc181a498313a880b12ba07"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/google/cadvisor/utils/cloudinfo",
|
"ImportPath": "github.com/google/cadvisor/utils/cloudinfo",
|
||||||
"Comment": "v0.23.0",
|
"Comment": "v0.23.2",
|
||||||
"Rev": "750f18e5eac3f6193b354fc14c03d92d4318a0ec"
|
"Rev": "7ddf6eb5d1f84363fbc181a498313a880b12ba07"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/google/cadvisor/utils/cpuload",
|
"ImportPath": "github.com/google/cadvisor/utils/cpuload",
|
||||||
"Comment": "v0.23.0",
|
"Comment": "v0.23.2",
|
||||||
"Rev": "750f18e5eac3f6193b354fc14c03d92d4318a0ec"
|
"Rev": "7ddf6eb5d1f84363fbc181a498313a880b12ba07"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/google/cadvisor/utils/cpuload/netlink",
|
"ImportPath": "github.com/google/cadvisor/utils/cpuload/netlink",
|
||||||
"Comment": "v0.23.0",
|
"Comment": "v0.23.2",
|
||||||
"Rev": "750f18e5eac3f6193b354fc14c03d92d4318a0ec"
|
"Rev": "7ddf6eb5d1f84363fbc181a498313a880b12ba07"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/google/cadvisor/utils/machine",
|
"ImportPath": "github.com/google/cadvisor/utils/docker",
|
||||||
"Comment": "v0.23.0",
|
"Comment": "v0.23.2",
|
||||||
"Rev": "750f18e5eac3f6193b354fc14c03d92d4318a0ec"
|
"Rev": "7ddf6eb5d1f84363fbc181a498313a880b12ba07"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/google/cadvisor/utils/oomparser",
|
"ImportPath": "github.com/google/cadvisor/utils/oomparser",
|
||||||
"Comment": "v0.23.0",
|
"Comment": "v0.23.2",
|
||||||
"Rev": "750f18e5eac3f6193b354fc14c03d92d4318a0ec"
|
"Rev": "7ddf6eb5d1f84363fbc181a498313a880b12ba07"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/google/cadvisor/utils/sysfs",
|
"ImportPath": "github.com/google/cadvisor/utils/sysfs",
|
||||||
"Comment": "v0.23.0",
|
"Comment": "v0.23.2",
|
||||||
"Rev": "750f18e5eac3f6193b354fc14c03d92d4318a0ec"
|
"Rev": "7ddf6eb5d1f84363fbc181a498313a880b12ba07"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/google/cadvisor/utils/sysinfo",
|
"ImportPath": "github.com/google/cadvisor/utils/sysinfo",
|
||||||
"Comment": "v0.23.0",
|
"Comment": "v0.23.2",
|
||||||
"Rev": "750f18e5eac3f6193b354fc14c03d92d4318a0ec"
|
"Rev": "7ddf6eb5d1f84363fbc181a498313a880b12ba07"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"ImportPath": "github.com/google/cadvisor/utils/tail",
|
||||||
|
"Comment": "v0.23.2",
|
||||||
|
"Rev": "7ddf6eb5d1f84363fbc181a498313a880b12ba07"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/google/cadvisor/validate",
|
"ImportPath": "github.com/google/cadvisor/validate",
|
||||||
"Comment": "v0.23.0",
|
"Comment": "v0.23.2",
|
||||||
"Rev": "750f18e5eac3f6193b354fc14c03d92d4318a0ec"
|
"Rev": "7ddf6eb5d1f84363fbc181a498313a880b12ba07"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/google/cadvisor/version",
|
"ImportPath": "github.com/google/cadvisor/version",
|
||||||
"Comment": "v0.23.0",
|
"Comment": "v0.23.2",
|
||||||
"Rev": "750f18e5eac3f6193b354fc14c03d92d4318a0ec"
|
"Rev": "7ddf6eb5d1f84363fbc181a498313a880b12ba07"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/google/gofuzz",
|
"ImportPath": "github.com/google/gofuzz",
|
||||||
@@ -1767,6 +1725,7 @@
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
"ImportPath": "github.com/robfig/cron",
|
"ImportPath": "github.com/robfig/cron",
|
||||||
|
"Comment": "v1-16-g0f39cf7",
|
||||||
"Rev": "0f39cf7ebc65a602f45692f9894bd6a193faf8fa"
|
"Rev": "0f39cf7ebc65a602f45692f9894bd6a193faf8fa"
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|||||||
1666
Godeps/LICENSES
generated
1666
Godeps/LICENSES
generated
File diff suppressed because it is too large
Load Diff
6
Godeps/OWNERS
generated
6
Godeps/OWNERS
generated
@@ -1,6 +0,0 @@
|
|||||||
assignees:
|
|
||||||
- davidopp
|
|
||||||
- eparis
|
|
||||||
- lavalamp
|
|
||||||
- quinton-hoole
|
|
||||||
- thockin
|
|
||||||
4
vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/.gitignore
generated
vendored
4
vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/.gitignore
generated
vendored
@@ -1,4 +0,0 @@
|
|||||||
testdata/conf_out.ini
|
|
||||||
ini.sublime-project
|
|
||||||
ini.sublime-workspace
|
|
||||||
testdata/conf_reflect.ini
|
|
||||||
191
vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/LICENSE
generated
vendored
191
vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/LICENSE
generated
vendored
@@ -1,191 +0,0 @@
|
|||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction, and
|
|
||||||
distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by the copyright
|
|
||||||
owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all other entities
|
|
||||||
that control, are controlled by, or are under common control with that entity.
|
|
||||||
For the purposes of this definition, "control" means (i) the power, direct or
|
|
||||||
indirect, to cause the direction or management of such entity, whether by
|
|
||||||
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity exercising
|
|
||||||
permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications, including
|
|
||||||
but not limited to software source code, documentation source, and configuration
|
|
||||||
files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical transformation or
|
|
||||||
translation of a Source form, including but not limited to compiled object code,
|
|
||||||
generated documentation, and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or Object form, made
|
|
||||||
available under the License, as indicated by a copyright notice that is included
|
|
||||||
in or attached to the work (an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object form, that
|
|
||||||
is based on (or derived from) the Work and for which the editorial revisions,
|
|
||||||
annotations, elaborations, or other modifications represent, as a whole, an
|
|
||||||
original work of authorship. For the purposes of this License, Derivative Works
|
|
||||||
shall not include works that remain separable from, or merely link (or bind by
|
|
||||||
name) to the interfaces of, the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including the original version
|
|
||||||
of the Work and any modifications or additions to that Work or Derivative Works
|
|
||||||
thereof, that is intentionally submitted to Licensor for inclusion in the Work
|
|
||||||
by the copyright owner or by an individual or Legal Entity authorized to submit
|
|
||||||
on behalf of the copyright owner. For the purposes of this definition,
|
|
||||||
"submitted" means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems, and
|
|
||||||
issue tracking systems that are managed by, or on behalf of, the Licensor for
|
|
||||||
the purpose of discussing and improving the Work, but excluding communication
|
|
||||||
that is conspicuously marked or otherwise designated in writing by the copyright
|
|
||||||
owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
|
|
||||||
of whom a Contribution has been received by Licensor and subsequently
|
|
||||||
incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License.
|
|
||||||
|
|
||||||
Subject to the terms and conditions of this License, each Contributor hereby
|
|
||||||
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
|
||||||
irrevocable copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the Work and such
|
|
||||||
Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License.
|
|
||||||
|
|
||||||
Subject to the terms and conditions of this License, each Contributor hereby
|
|
||||||
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
|
||||||
irrevocable (except as stated in this section) patent license to make, have
|
|
||||||
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
|
|
||||||
such license applies only to those patent claims licensable by such Contributor
|
|
||||||
that are necessarily infringed by their Contribution(s) alone or by combination
|
|
||||||
of their Contribution(s) with the Work to which such Contribution(s) was
|
|
||||||
submitted. If You institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
|
|
||||||
Contribution incorporated within the Work constitutes direct or contributory
|
|
||||||
patent infringement, then any patent licenses granted to You under this License
|
|
||||||
for that Work shall terminate as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution.
|
|
||||||
|
|
||||||
You may reproduce and distribute copies of the Work or Derivative Works thereof
|
|
||||||
in any medium, with or without modifications, and in Source or Object form,
|
|
||||||
provided that You meet the following conditions:
|
|
||||||
|
|
||||||
You must give any other recipients of the Work or Derivative Works a copy of
|
|
||||||
this License; and
|
|
||||||
You must cause any modified files to carry prominent notices stating that You
|
|
||||||
changed the files; and
|
|
||||||
You must retain, in the Source form of any Derivative Works that You distribute,
|
|
||||||
all copyright, patent, trademark, and attribution notices from the Source form
|
|
||||||
of the Work, excluding those notices that do not pertain to any part of the
|
|
||||||
Derivative Works; and
|
|
||||||
If the Work includes a "NOTICE" text file as part of its distribution, then any
|
|
||||||
Derivative Works that You distribute must include a readable copy of the
|
|
||||||
attribution notices contained within such NOTICE file, excluding those notices
|
|
||||||
that do not pertain to any part of the Derivative Works, in at least one of the
|
|
||||||
following places: within a NOTICE text file distributed as part of the
|
|
||||||
Derivative Works; within the Source form or documentation, if provided along
|
|
||||||
with the Derivative Works; or, within a display generated by the Derivative
|
|
||||||
Works, if and wherever such third-party notices normally appear. The contents of
|
|
||||||
the NOTICE file are for informational purposes only and do not modify the
|
|
||||||
License. You may add Your own attribution notices within Derivative Works that
|
|
||||||
You distribute, alongside or as an addendum to the NOTICE text from the Work,
|
|
||||||
provided that such additional attribution notices cannot be construed as
|
|
||||||
modifying the License.
|
|
||||||
You may add Your own copyright statement to Your modifications and may provide
|
|
||||||
additional or different license terms and conditions for use, reproduction, or
|
|
||||||
distribution of Your modifications, or for any such Derivative Works as a whole,
|
|
||||||
provided Your use, reproduction, and distribution of the Work otherwise complies
|
|
||||||
with the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions.
|
|
||||||
|
|
||||||
Unless You explicitly state otherwise, any Contribution intentionally submitted
|
|
||||||
for inclusion in the Work by You to the Licensor shall be under the terms and
|
|
||||||
conditions of this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify the terms of
|
|
||||||
any separate license agreement you may have executed with Licensor regarding
|
|
||||||
such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks.
|
|
||||||
|
|
||||||
This License does not grant permission to use the trade names, trademarks,
|
|
||||||
service marks, or product names of the Licensor, except as required for
|
|
||||||
reasonable and customary use in describing the origin of the Work and
|
|
||||||
reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty.
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, Licensor provides the
|
|
||||||
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
|
|
||||||
including, without limitation, any warranties or conditions of TITLE,
|
|
||||||
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
|
|
||||||
solely responsible for determining the appropriateness of using or
|
|
||||||
redistributing the Work and assume any risks associated with Your exercise of
|
|
||||||
permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability.
|
|
||||||
|
|
||||||
In no event and under no legal theory, whether in tort (including negligence),
|
|
||||||
contract, or otherwise, unless required by applicable law (such as deliberate
|
|
||||||
and grossly negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special, incidental,
|
|
||||||
or consequential damages of any character arising as a result of this License or
|
|
||||||
out of the use or inability to use the Work (including but not limited to
|
|
||||||
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
|
|
||||||
any and all other commercial damages or losses), even if such Contributor has
|
|
||||||
been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability.
|
|
||||||
|
|
||||||
While redistributing the Work or Derivative Works thereof, You may choose to
|
|
||||||
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
|
|
||||||
other liability obligations and/or rights consistent with this License. However,
|
|
||||||
in accepting such obligations, You may act only on Your own behalf and on Your
|
|
||||||
sole responsibility, not on behalf of any other Contributor, and only if You
|
|
||||||
agree to indemnify, defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason of your
|
|
||||||
accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
APPENDIX: How to apply the Apache License to your work
|
|
||||||
|
|
||||||
To apply the Apache License to your work, attach the following boilerplate
|
|
||||||
notice, with the fields enclosed by brackets "[]" replaced with your own
|
|
||||||
identifying information. (Don't include the brackets!) The text should be
|
|
||||||
enclosed in the appropriate comment syntax for the file format. We also
|
|
||||||
recommend that a file or class name and description of purpose be included on
|
|
||||||
the same "printed page" as the copyright notice for easier identification within
|
|
||||||
third-party archives.
|
|
||||||
|
|
||||||
Copyright [yyyy] [name of copyright owner]
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
560
vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/README.md
generated
vendored
560
vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/README.md
generated
vendored
@@ -1,560 +0,0 @@
|
|||||||
ini [](https://drone.io/github.com/go-ini/ini/latest) [](http://gocover.io/github.com/go-ini/ini)
|
|
||||||
===
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
Package ini provides INI file read and write functionality in Go.
|
|
||||||
|
|
||||||
[简体中文](README_ZH.md)
|
|
||||||
|
|
||||||
## Feature
|
|
||||||
|
|
||||||
- Load multiple data sources(`[]byte` or file) with overwrites.
|
|
||||||
- Read with recursion values.
|
|
||||||
- Read with parent-child sections.
|
|
||||||
- Read with auto-increment key names.
|
|
||||||
- Read with multiple-line values.
|
|
||||||
- Read with tons of helper methods.
|
|
||||||
- Read and convert values to Go types.
|
|
||||||
- Read and **WRITE** comments of sections and keys.
|
|
||||||
- Manipulate sections, keys and comments with ease.
|
|
||||||
- Keep sections and keys in order as you parse and save.
|
|
||||||
|
|
||||||
## Installation
|
|
||||||
|
|
||||||
go get gopkg.in/ini.v1
|
|
||||||
|
|
||||||
## Getting Started
|
|
||||||
|
|
||||||
### Loading from data sources
|
|
||||||
|
|
||||||
A **Data Source** is either raw data in type `[]byte` or a file name with type `string` and you can load **as many as** data sources you want. Passing other types will simply return an error.
|
|
||||||
|
|
||||||
```go
|
|
||||||
cfg, err := ini.Load([]byte("raw data"), "filename")
|
|
||||||
```
|
|
||||||
|
|
||||||
Or start with an empty object:
|
|
||||||
|
|
||||||
```go
|
|
||||||
cfg := ini.Empty()
|
|
||||||
```
|
|
||||||
|
|
||||||
When you cannot decide how many data sources to load at the beginning, you still able to **Append()** them later.
|
|
||||||
|
|
||||||
```go
|
|
||||||
err := cfg.Append("other file", []byte("other raw data"))
|
|
||||||
```
|
|
||||||
|
|
||||||
### Working with sections
|
|
||||||
|
|
||||||
To get a section, you would need to:
|
|
||||||
|
|
||||||
```go
|
|
||||||
section, err := cfg.GetSection("section name")
|
|
||||||
```
|
|
||||||
|
|
||||||
For a shortcut for default section, just give an empty string as name:
|
|
||||||
|
|
||||||
```go
|
|
||||||
section, err := cfg.GetSection("")
|
|
||||||
```
|
|
||||||
|
|
||||||
When you're pretty sure the section exists, following code could make your life easier:
|
|
||||||
|
|
||||||
```go
|
|
||||||
section := cfg.Section("")
|
|
||||||
```
|
|
||||||
|
|
||||||
What happens when the section somehow does not exist? Don't panic, it automatically creates and returns a new section to you.
|
|
||||||
|
|
||||||
To create a new section:
|
|
||||||
|
|
||||||
```go
|
|
||||||
err := cfg.NewSection("new section")
|
|
||||||
```
|
|
||||||
|
|
||||||
To get a list of sections or section names:
|
|
||||||
|
|
||||||
```go
|
|
||||||
sections := cfg.Sections()
|
|
||||||
names := cfg.SectionStrings()
|
|
||||||
```
|
|
||||||
|
|
||||||
### Working with keys
|
|
||||||
|
|
||||||
To get a key under a section:
|
|
||||||
|
|
||||||
```go
|
|
||||||
key, err := cfg.Section("").GetKey("key name")
|
|
||||||
```
|
|
||||||
|
|
||||||
Same rule applies to key operations:
|
|
||||||
|
|
||||||
```go
|
|
||||||
key := cfg.Section("").Key("key name")
|
|
||||||
```
|
|
||||||
|
|
||||||
To create a new key:
|
|
||||||
|
|
||||||
```go
|
|
||||||
err := cfg.Section("").NewKey("name", "value")
|
|
||||||
```
|
|
||||||
|
|
||||||
To get a list of keys or key names:
|
|
||||||
|
|
||||||
```go
|
|
||||||
keys := cfg.Section("").Keys()
|
|
||||||
names := cfg.Section("").KeyStrings()
|
|
||||||
```
|
|
||||||
|
|
||||||
To get a clone hash of keys and corresponding values:
|
|
||||||
|
|
||||||
```go
|
|
||||||
hash := cfg.GetSection("").KeysHash()
|
|
||||||
```
|
|
||||||
|
|
||||||
### Working with values
|
|
||||||
|
|
||||||
To get a string value:
|
|
||||||
|
|
||||||
```go
|
|
||||||
val := cfg.Section("").Key("key name").String()
|
|
||||||
```
|
|
||||||
|
|
||||||
To validate key value on the fly:
|
|
||||||
|
|
||||||
```go
|
|
||||||
val := cfg.Section("").Key("key name").Validate(func(in string) string {
|
|
||||||
if len(in) == 0 {
|
|
||||||
return "default"
|
|
||||||
}
|
|
||||||
return in
|
|
||||||
})
|
|
||||||
```
|
|
||||||
|
|
||||||
To get value with types:
|
|
||||||
|
|
||||||
```go
|
|
||||||
// For boolean values:
|
|
||||||
// true when value is: 1, t, T, TRUE, true, True, YES, yes, Yes, ON, on, On
|
|
||||||
// false when value is: 0, f, F, FALSE, false, False, NO, no, No, OFF, off, Off
|
|
||||||
v, err = cfg.Section("").Key("BOOL").Bool()
|
|
||||||
v, err = cfg.Section("").Key("FLOAT64").Float64()
|
|
||||||
v, err = cfg.Section("").Key("INT").Int()
|
|
||||||
v, err = cfg.Section("").Key("INT64").Int64()
|
|
||||||
v, err = cfg.Section("").Key("UINT").Uint()
|
|
||||||
v, err = cfg.Section("").Key("UINT64").Uint64()
|
|
||||||
v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339)
|
|
||||||
v, err = cfg.Section("").Key("TIME").Time() // RFC3339
|
|
||||||
|
|
||||||
v = cfg.Section("").Key("BOOL").MustBool()
|
|
||||||
v = cfg.Section("").Key("FLOAT64").MustFloat64()
|
|
||||||
v = cfg.Section("").Key("INT").MustInt()
|
|
||||||
v = cfg.Section("").Key("INT64").MustInt64()
|
|
||||||
v = cfg.Section("").Key("UINT").MustUint()
|
|
||||||
v = cfg.Section("").Key("UINT64").MustUint64()
|
|
||||||
v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339)
|
|
||||||
v = cfg.Section("").Key("TIME").MustTime() // RFC3339
|
|
||||||
|
|
||||||
// Methods start with Must also accept one argument for default value
|
|
||||||
// when key not found or fail to parse value to given type.
|
|
||||||
// Except method MustString, which you have to pass a default value.
|
|
||||||
|
|
||||||
v = cfg.Section("").Key("String").MustString("default")
|
|
||||||
v = cfg.Section("").Key("BOOL").MustBool(true)
|
|
||||||
v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25)
|
|
||||||
v = cfg.Section("").Key("INT").MustInt(10)
|
|
||||||
v = cfg.Section("").Key("INT64").MustInt64(99)
|
|
||||||
v = cfg.Section("").Key("UINT").MustUint(3)
|
|
||||||
v = cfg.Section("").Key("UINT64").MustUint64(6)
|
|
||||||
v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now())
|
|
||||||
v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339
|
|
||||||
```
|
|
||||||
|
|
||||||
What if my value is three-line long?
|
|
||||||
|
|
||||||
```ini
|
|
||||||
[advance]
|
|
||||||
ADDRESS = """404 road,
|
|
||||||
NotFound, State, 5000
|
|
||||||
Earth"""
|
|
||||||
```
|
|
||||||
|
|
||||||
Not a problem!
|
|
||||||
|
|
||||||
```go
|
|
||||||
cfg.Section("advance").Key("ADDRESS").String()
|
|
||||||
|
|
||||||
/* --- start ---
|
|
||||||
404 road,
|
|
||||||
NotFound, State, 5000
|
|
||||||
Earth
|
|
||||||
------ end --- */
|
|
||||||
```
|
|
||||||
|
|
||||||
That's cool, how about continuation lines?
|
|
||||||
|
|
||||||
```ini
|
|
||||||
[advance]
|
|
||||||
two_lines = how about \
|
|
||||||
continuation lines?
|
|
||||||
lots_of_lines = 1 \
|
|
||||||
2 \
|
|
||||||
3 \
|
|
||||||
4
|
|
||||||
```
|
|
||||||
|
|
||||||
Piece of cake!
|
|
||||||
|
|
||||||
```go
|
|
||||||
cfg.Section("advance").Key("two_lines").String() // how about continuation lines?
|
|
||||||
cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4
|
|
||||||
```
|
|
||||||
|
|
||||||
Note that single quotes around values will be stripped:
|
|
||||||
|
|
||||||
```ini
|
|
||||||
foo = "some value" // foo: some value
|
|
||||||
bar = 'some value' // bar: some value
|
|
||||||
```
|
|
||||||
|
|
||||||
That's all? Hmm, no.
|
|
||||||
|
|
||||||
#### Helper methods of working with values
|
|
||||||
|
|
||||||
To get value with given candidates:
|
|
||||||
|
|
||||||
```go
|
|
||||||
v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"})
|
|
||||||
v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75})
|
|
||||||
v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30})
|
|
||||||
v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30})
|
|
||||||
v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9})
|
|
||||||
v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9})
|
|
||||||
v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3})
|
|
||||||
v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339
|
|
||||||
```
|
|
||||||
|
|
||||||
Default value will be presented if value of key is not in candidates you given, and default value does not need be one of candidates.
|
|
||||||
|
|
||||||
To validate value in a given range:
|
|
||||||
|
|
||||||
```go
|
|
||||||
vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2)
|
|
||||||
vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20)
|
|
||||||
vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20)
|
|
||||||
vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9)
|
|
||||||
vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9)
|
|
||||||
vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime)
|
|
||||||
vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339
|
|
||||||
```
|
|
||||||
|
|
||||||
To auto-split value into slice:
|
|
||||||
|
|
||||||
```go
|
|
||||||
vals = cfg.Section("").Key("STRINGS").Strings(",")
|
|
||||||
vals = cfg.Section("").Key("FLOAT64S").Float64s(",")
|
|
||||||
vals = cfg.Section("").Key("INTS").Ints(",")
|
|
||||||
vals = cfg.Section("").Key("INT64S").Int64s(",")
|
|
||||||
vals = cfg.Section("").Key("UINTS").Uints(",")
|
|
||||||
vals = cfg.Section("").Key("UINT64S").Uint64s(",")
|
|
||||||
vals = cfg.Section("").Key("TIMES").Times(",")
|
|
||||||
```
|
|
||||||
|
|
||||||
### Save your configuration
|
|
||||||
|
|
||||||
Finally, it's time to save your configuration to somewhere.
|
|
||||||
|
|
||||||
A typical way to save configuration is writing it to a file:
|
|
||||||
|
|
||||||
```go
|
|
||||||
// ...
|
|
||||||
err = cfg.SaveTo("my.ini")
|
|
||||||
err = cfg.SaveToIndent("my.ini", "\t")
|
|
||||||
```
|
|
||||||
|
|
||||||
Another way to save is writing to a `io.Writer` interface:
|
|
||||||
|
|
||||||
```go
|
|
||||||
// ...
|
|
||||||
cfg.WriteTo(writer)
|
|
||||||
cfg.WriteToIndent(writer, "\t")
|
|
||||||
```
|
|
||||||
|
|
||||||
## Advanced Usage
|
|
||||||
|
|
||||||
### Recursive Values
|
|
||||||
|
|
||||||
For all value of keys, there is a special syntax `%(<name>)s`, where `<name>` is the key name in same section or default section, and `%(<name>)s` will be replaced by corresponding value(empty string if key not found). You can use this syntax at most 99 level of recursions.
|
|
||||||
|
|
||||||
```ini
|
|
||||||
NAME = ini
|
|
||||||
|
|
||||||
[author]
|
|
||||||
NAME = Unknwon
|
|
||||||
GITHUB = https://github.com/%(NAME)s
|
|
||||||
|
|
||||||
[package]
|
|
||||||
FULL_NAME = github.com/go-ini/%(NAME)s
|
|
||||||
```
|
|
||||||
|
|
||||||
```go
|
|
||||||
cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon
|
|
||||||
cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini
|
|
||||||
```
|
|
||||||
|
|
||||||
### Parent-child Sections
|
|
||||||
|
|
||||||
You can use `.` in section name to indicate parent-child relationship between two or more sections. If the key not found in the child section, library will try again on its parent section until there is no parent section.
|
|
||||||
|
|
||||||
```ini
|
|
||||||
NAME = ini
|
|
||||||
VERSION = v1
|
|
||||||
IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s
|
|
||||||
|
|
||||||
[package]
|
|
||||||
CLONE_URL = https://%(IMPORT_PATH)s
|
|
||||||
|
|
||||||
[package.sub]
|
|
||||||
```
|
|
||||||
|
|
||||||
```go
|
|
||||||
cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1
|
|
||||||
```
|
|
||||||
|
|
||||||
### Auto-increment Key Names
|
|
||||||
|
|
||||||
If key name is `-` in data source, then it would be seen as special syntax for auto-increment key name start from 1, and every section is independent on counter.
|
|
||||||
|
|
||||||
```ini
|
|
||||||
[features]
|
|
||||||
-: Support read/write comments of keys and sections
|
|
||||||
-: Support auto-increment of key names
|
|
||||||
-: Support load multiple files to overwrite key values
|
|
||||||
```
|
|
||||||
|
|
||||||
```go
|
|
||||||
cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"}
|
|
||||||
```
|
|
||||||
|
|
||||||
### Map To Struct
|
|
||||||
|
|
||||||
Want more objective way to play with INI? Cool.
|
|
||||||
|
|
||||||
```ini
|
|
||||||
Name = Unknwon
|
|
||||||
age = 21
|
|
||||||
Male = true
|
|
||||||
Born = 1993-01-01T20:17:05Z
|
|
||||||
|
|
||||||
[Note]
|
|
||||||
Content = Hi is a good man!
|
|
||||||
Cities = HangZhou, Boston
|
|
||||||
```
|
|
||||||
|
|
||||||
```go
|
|
||||||
type Note struct {
|
|
||||||
Content string
|
|
||||||
Cities []string
|
|
||||||
}
|
|
||||||
|
|
||||||
type Person struct {
|
|
||||||
Name string
|
|
||||||
Age int `ini:"age"`
|
|
||||||
Male bool
|
|
||||||
Born time.Time
|
|
||||||
Note
|
|
||||||
Created time.Time `ini:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
cfg, err := ini.Load("path/to/ini")
|
|
||||||
// ...
|
|
||||||
p := new(Person)
|
|
||||||
err = cfg.MapTo(p)
|
|
||||||
// ...
|
|
||||||
|
|
||||||
// Things can be simpler.
|
|
||||||
err = ini.MapTo(p, "path/to/ini")
|
|
||||||
// ...
|
|
||||||
|
|
||||||
// Just map a section? Fine.
|
|
||||||
n := new(Note)
|
|
||||||
err = cfg.Section("Note").MapTo(n)
|
|
||||||
// ...
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Can I have default value for field? Absolutely.
|
|
||||||
|
|
||||||
Assign it before you map to struct. It will keep the value as it is if the key is not presented or got wrong type.
|
|
||||||
|
|
||||||
```go
|
|
||||||
// ...
|
|
||||||
p := &Person{
|
|
||||||
Name: "Joe",
|
|
||||||
}
|
|
||||||
// ...
|
|
||||||
```
|
|
||||||
|
|
||||||
It's really cool, but what's the point if you can't give me my file back from struct?
|
|
||||||
|
|
||||||
### Reflect From Struct
|
|
||||||
|
|
||||||
Why not?
|
|
||||||
|
|
||||||
```go
|
|
||||||
type Embeded struct {
|
|
||||||
Dates []time.Time `delim:"|"`
|
|
||||||
Places []string
|
|
||||||
None []int
|
|
||||||
}
|
|
||||||
|
|
||||||
type Author struct {
|
|
||||||
Name string `ini:"NAME"`
|
|
||||||
Male bool
|
|
||||||
Age int
|
|
||||||
GPA float64
|
|
||||||
NeverMind string `ini:"-"`
|
|
||||||
*Embeded
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
a := &Author{"Unknwon", true, 21, 2.8, "",
|
|
||||||
&Embeded{
|
|
||||||
[]time.Time{time.Now(), time.Now()},
|
|
||||||
[]string{"HangZhou", "Boston"},
|
|
||||||
[]int{},
|
|
||||||
}}
|
|
||||||
cfg := ini.Empty()
|
|
||||||
err = ini.ReflectFrom(cfg, a)
|
|
||||||
// ...
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
So, what do I get?
|
|
||||||
|
|
||||||
```ini
|
|
||||||
NAME = Unknwon
|
|
||||||
Male = true
|
|
||||||
Age = 21
|
|
||||||
GPA = 2.8
|
|
||||||
|
|
||||||
[Embeded]
|
|
||||||
Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00
|
|
||||||
Places = HangZhou,Boston
|
|
||||||
None =
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Name Mapper
|
|
||||||
|
|
||||||
To save your time and make your code cleaner, this library supports [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) between struct field and actual section and key name.
|
|
||||||
|
|
||||||
There are 2 built-in name mappers:
|
|
||||||
|
|
||||||
- `AllCapsUnderscore`: it converts to format `ALL_CAPS_UNDERSCORE` then match section or key.
|
|
||||||
- `TitleUnderscore`: it converts to format `title_underscore` then match section or key.
|
|
||||||
|
|
||||||
To use them:
|
|
||||||
|
|
||||||
```go
|
|
||||||
type Info struct {
|
|
||||||
PackageName string
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("packag_name=ini"))
|
|
||||||
// ...
|
|
||||||
|
|
||||||
cfg, err := ini.Load([]byte("PACKAGE_NAME=ini"))
|
|
||||||
// ...
|
|
||||||
info := new(Info)
|
|
||||||
cfg.NameMapper = ini.AllCapsUnderscore
|
|
||||||
err = cfg.MapTo(info)
|
|
||||||
// ...
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Same rules of name mapper apply to `ini.ReflectFromWithMapper` function.
|
|
||||||
|
|
||||||
#### Other Notes On Map/Reflect
|
|
||||||
|
|
||||||
Any embedded struct is treated as a section by default, and there is no automatic parent-child relations in map/reflect feature:
|
|
||||||
|
|
||||||
```go
|
|
||||||
type Child struct {
|
|
||||||
Age string
|
|
||||||
}
|
|
||||||
|
|
||||||
type Parent struct {
|
|
||||||
Name string
|
|
||||||
Child
|
|
||||||
}
|
|
||||||
|
|
||||||
type Config struct {
|
|
||||||
City string
|
|
||||||
Parent
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Example configuration:
|
|
||||||
|
|
||||||
```ini
|
|
||||||
City = Boston
|
|
||||||
|
|
||||||
[Parent]
|
|
||||||
Name = Unknwon
|
|
||||||
|
|
||||||
[Child]
|
|
||||||
Age = 21
|
|
||||||
```
|
|
||||||
|
|
||||||
What if, yes, I'm paranoid, I want embedded struct to be in the same section. Well, all roads lead to Rome.
|
|
||||||
|
|
||||||
```go
|
|
||||||
type Child struct {
|
|
||||||
Age string
|
|
||||||
}
|
|
||||||
|
|
||||||
type Parent struct {
|
|
||||||
Name string
|
|
||||||
Child `ini:"Parent"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type Config struct {
|
|
||||||
City string
|
|
||||||
Parent
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Example configuration:
|
|
||||||
|
|
||||||
```ini
|
|
||||||
City = Boston
|
|
||||||
|
|
||||||
[Parent]
|
|
||||||
Name = Unknwon
|
|
||||||
Age = 21
|
|
||||||
```
|
|
||||||
|
|
||||||
## Getting Help
|
|
||||||
|
|
||||||
- [API Documentation](https://gowalker.org/gopkg.in/ini.v1)
|
|
||||||
- [File An Issue](https://github.com/go-ini/ini/issues/new)
|
|
||||||
|
|
||||||
## FAQs
|
|
||||||
|
|
||||||
### What does `BlockMode` field do?
|
|
||||||
|
|
||||||
By default, library lets you read and write values so we need a locker to make sure your data is safe. But in cases that you are very sure about only reading data through the library, you can set `cfg.BlockMode = false` to speed up read operations about **50-70%** faster.
|
|
||||||
|
|
||||||
### Why another INI library?
|
|
||||||
|
|
||||||
Many people are using my another INI library [goconfig](https://github.com/Unknwon/goconfig), so the reason for this one is I would like to make more Go style code. Also when you set `cfg.BlockMode = false`, this one is about **10-30%** faster.
|
|
||||||
|
|
||||||
To make those changes I have to confirm API broken, so it's safer to keep it in another place and start using `gopkg.in` to version my package at this time.(PS: shorter import path)
|
|
||||||
|
|
||||||
## License
|
|
||||||
|
|
||||||
This project is under Apache v2 License. See the [LICENSE](LICENSE) file for the full license text.
|
|
||||||
547
vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/README_ZH.md
generated
vendored
547
vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/README_ZH.md
generated
vendored
@@ -1,547 +0,0 @@
|
|||||||
本包提供了 Go 语言中读写 INI 文件的功能。
|
|
||||||
|
|
||||||
## 功能特性
|
|
||||||
|
|
||||||
- 支持覆盖加载多个数据源(`[]byte` 或文件)
|
|
||||||
- 支持递归读取键值
|
|
||||||
- 支持读取父子分区
|
|
||||||
- 支持读取自增键名
|
|
||||||
- 支持读取多行的键值
|
|
||||||
- 支持大量辅助方法
|
|
||||||
- 支持在读取时直接转换为 Go 语言类型
|
|
||||||
- 支持读取和 **写入** 分区和键的注释
|
|
||||||
- 轻松操作分区、键值和注释
|
|
||||||
- 在保存文件时分区和键值会保持原有的顺序
|
|
||||||
|
|
||||||
## 下载安装
|
|
||||||
|
|
||||||
go get gopkg.in/ini.v1
|
|
||||||
|
|
||||||
## 开始使用
|
|
||||||
|
|
||||||
### 从数据源加载
|
|
||||||
|
|
||||||
一个 **数据源** 可以是 `[]byte` 类型的原始数据,或 `string` 类型的文件路径。您可以加载 **任意多个** 数据源。如果您传递其它类型的数据源,则会直接返回错误。
|
|
||||||
|
|
||||||
```go
|
|
||||||
cfg, err := ini.Load([]byte("raw data"), "filename")
|
|
||||||
```
|
|
||||||
|
|
||||||
或者从一个空白的文件开始:
|
|
||||||
|
|
||||||
```go
|
|
||||||
cfg := ini.Empty()
|
|
||||||
```
|
|
||||||
|
|
||||||
当您在一开始无法决定需要加载哪些数据源时,仍可以使用 **Append()** 在需要的时候加载它们。
|
|
||||||
|
|
||||||
```go
|
|
||||||
err := cfg.Append("other file", []byte("other raw data"))
|
|
||||||
```
|
|
||||||
|
|
||||||
### 操作分区(Section)
|
|
||||||
|
|
||||||
获取指定分区:
|
|
||||||
|
|
||||||
```go
|
|
||||||
section, err := cfg.GetSection("section name")
|
|
||||||
```
|
|
||||||
|
|
||||||
如果您想要获取默认分区,则可以用空字符串代替分区名:
|
|
||||||
|
|
||||||
```go
|
|
||||||
section, err := cfg.GetSection("")
|
|
||||||
```
|
|
||||||
|
|
||||||
当您非常确定某个分区是存在的,可以使用以下简便方法:
|
|
||||||
|
|
||||||
```go
|
|
||||||
section := cfg.Section("")
|
|
||||||
```
|
|
||||||
|
|
||||||
如果不小心判断错了,要获取的分区其实是不存在的,那会发生什么呢?没事的,它会自动创建并返回一个对应的分区对象给您。
|
|
||||||
|
|
||||||
创建一个分区:
|
|
||||||
|
|
||||||
```go
|
|
||||||
err := cfg.NewSection("new section")
|
|
||||||
```
|
|
||||||
|
|
||||||
获取所有分区对象或名称:
|
|
||||||
|
|
||||||
```go
|
|
||||||
sections := cfg.Sections()
|
|
||||||
names := cfg.SectionStrings()
|
|
||||||
```
|
|
||||||
|
|
||||||
### 操作键(Key)
|
|
||||||
|
|
||||||
获取某个分区下的键:
|
|
||||||
|
|
||||||
```go
|
|
||||||
key, err := cfg.Section("").GetKey("key name")
|
|
||||||
```
|
|
||||||
|
|
||||||
和分区一样,您也可以直接获取键而忽略错误处理:
|
|
||||||
|
|
||||||
```go
|
|
||||||
key := cfg.Section("").Key("key name")
|
|
||||||
```
|
|
||||||
|
|
||||||
创建一个新的键:
|
|
||||||
|
|
||||||
```go
|
|
||||||
err := cfg.Section("").NewKey("name", "value")
|
|
||||||
```
|
|
||||||
|
|
||||||
获取分区下的所有键或键名:
|
|
||||||
|
|
||||||
```go
|
|
||||||
keys := cfg.Section("").Keys()
|
|
||||||
names := cfg.Section("").KeyStrings()
|
|
||||||
```
|
|
||||||
|
|
||||||
获取分区下的所有键值对的克隆:
|
|
||||||
|
|
||||||
```go
|
|
||||||
hash := cfg.GetSection("").KeysHash()
|
|
||||||
```
|
|
||||||
|
|
||||||
### 操作键值(Value)
|
|
||||||
|
|
||||||
获取一个类型为字符串(string)的值:
|
|
||||||
|
|
||||||
```go
|
|
||||||
val := cfg.Section("").Key("key name").String()
|
|
||||||
```
|
|
||||||
|
|
||||||
获取值的同时通过自定义函数进行处理验证:
|
|
||||||
|
|
||||||
```go
|
|
||||||
val := cfg.Section("").Key("key name").Validate(func(in string) string {
|
|
||||||
if len(in) == 0 {
|
|
||||||
return "default"
|
|
||||||
}
|
|
||||||
return in
|
|
||||||
})
|
|
||||||
```
|
|
||||||
|
|
||||||
获取其它类型的值:
|
|
||||||
|
|
||||||
```go
|
|
||||||
// 布尔值的规则:
|
|
||||||
// true 当值为:1, t, T, TRUE, true, True, YES, yes, Yes, ON, on, On
|
|
||||||
// false 当值为:0, f, F, FALSE, false, False, NO, no, No, OFF, off, Off
|
|
||||||
v, err = cfg.Section("").Key("BOOL").Bool()
|
|
||||||
v, err = cfg.Section("").Key("FLOAT64").Float64()
|
|
||||||
v, err = cfg.Section("").Key("INT").Int()
|
|
||||||
v, err = cfg.Section("").Key("INT64").Int64()
|
|
||||||
v, err = cfg.Section("").Key("UINT").Uint()
|
|
||||||
v, err = cfg.Section("").Key("UINT64").Uint64()
|
|
||||||
v, err = cfg.Section("").Key("TIME").TimeFormat(time.RFC3339)
|
|
||||||
v, err = cfg.Section("").Key("TIME").Time() // RFC3339
|
|
||||||
|
|
||||||
v = cfg.Section("").Key("BOOL").MustBool()
|
|
||||||
v = cfg.Section("").Key("FLOAT64").MustFloat64()
|
|
||||||
v = cfg.Section("").Key("INT").MustInt()
|
|
||||||
v = cfg.Section("").Key("INT64").MustInt64()
|
|
||||||
v = cfg.Section("").Key("UINT").MustUint()
|
|
||||||
v = cfg.Section("").Key("UINT64").MustUint64()
|
|
||||||
v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339)
|
|
||||||
v = cfg.Section("").Key("TIME").MustTime() // RFC3339
|
|
||||||
|
|
||||||
// 由 Must 开头的方法名允许接收一个相同类型的参数来作为默认值,
|
|
||||||
// 当键不存在或者转换失败时,则会直接返回该默认值。
|
|
||||||
// 但是,MustString 方法必须传递一个默认值。
|
|
||||||
|
|
||||||
v = cfg.Seciont("").Key("String").MustString("default")
|
|
||||||
v = cfg.Section("").Key("BOOL").MustBool(true)
|
|
||||||
v = cfg.Section("").Key("FLOAT64").MustFloat64(1.25)
|
|
||||||
v = cfg.Section("").Key("INT").MustInt(10)
|
|
||||||
v = cfg.Section("").Key("INT64").MustInt64(99)
|
|
||||||
v = cfg.Section("").Key("UINT").MustUint(3)
|
|
||||||
v = cfg.Section("").Key("UINT64").MustUint64(6)
|
|
||||||
v = cfg.Section("").Key("TIME").MustTimeFormat(time.RFC3339, time.Now())
|
|
||||||
v = cfg.Section("").Key("TIME").MustTime(time.Now()) // RFC3339
|
|
||||||
```
|
|
||||||
|
|
||||||
如果我的值有好多行怎么办?
|
|
||||||
|
|
||||||
```ini
|
|
||||||
[advance]
|
|
||||||
ADDRESS = """404 road,
|
|
||||||
NotFound, State, 5000
|
|
||||||
Earth"""
|
|
||||||
```
|
|
||||||
|
|
||||||
嗯哼?小 case!
|
|
||||||
|
|
||||||
```go
|
|
||||||
cfg.Section("advance").Key("ADDRESS").String()
|
|
||||||
|
|
||||||
/* --- start ---
|
|
||||||
404 road,
|
|
||||||
NotFound, State, 5000
|
|
||||||
Earth
|
|
||||||
------ end --- */
|
|
||||||
```
|
|
||||||
|
|
||||||
赞爆了!那要是我属于一行的内容写不下想要写到第二行怎么办?
|
|
||||||
|
|
||||||
```ini
|
|
||||||
[advance]
|
|
||||||
two_lines = how about \
|
|
||||||
continuation lines?
|
|
||||||
lots_of_lines = 1 \
|
|
||||||
2 \
|
|
||||||
3 \
|
|
||||||
4
|
|
||||||
```
|
|
||||||
|
|
||||||
简直是小菜一碟!
|
|
||||||
|
|
||||||
```go
|
|
||||||
cfg.Section("advance").Key("two_lines").String() // how about continuation lines?
|
|
||||||
cfg.Section("advance").Key("lots_of_lines").String() // 1 2 3 4
|
|
||||||
```
|
|
||||||
|
|
||||||
需要注意的是,值两侧的单引号会被自动剔除:
|
|
||||||
|
|
||||||
```ini
|
|
||||||
foo = "some value" // foo: some value
|
|
||||||
bar = 'some value' // bar: some value
|
|
||||||
```
|
|
||||||
|
|
||||||
这就是全部了?哈哈,当然不是。
|
|
||||||
|
|
||||||
#### 操作键值的辅助方法
|
|
||||||
|
|
||||||
获取键值时设定候选值:
|
|
||||||
|
|
||||||
```go
|
|
||||||
v = cfg.Section("").Key("STRING").In("default", []string{"str", "arr", "types"})
|
|
||||||
v = cfg.Section("").Key("FLOAT64").InFloat64(1.1, []float64{1.25, 2.5, 3.75})
|
|
||||||
v = cfg.Section("").Key("INT").InInt(5, []int{10, 20, 30})
|
|
||||||
v = cfg.Section("").Key("INT64").InInt64(10, []int64{10, 20, 30})
|
|
||||||
v = cfg.Section("").Key("UINT").InUint(4, []int{3, 6, 9})
|
|
||||||
v = cfg.Section("").Key("UINT64").InUint64(8, []int64{3, 6, 9})
|
|
||||||
v = cfg.Section("").Key("TIME").InTimeFormat(time.RFC3339, time.Now(), []time.Time{time1, time2, time3})
|
|
||||||
v = cfg.Section("").Key("TIME").InTime(time.Now(), []time.Time{time1, time2, time3}) // RFC3339
|
|
||||||
```
|
|
||||||
|
|
||||||
如果获取到的值不是候选值的任意一个,则会返回默认值,而默认值不需要是候选值中的一员。
|
|
||||||
|
|
||||||
验证获取的值是否在指定范围内:
|
|
||||||
|
|
||||||
```go
|
|
||||||
vals = cfg.Section("").Key("FLOAT64").RangeFloat64(0.0, 1.1, 2.2)
|
|
||||||
vals = cfg.Section("").Key("INT").RangeInt(0, 10, 20)
|
|
||||||
vals = cfg.Section("").Key("INT64").RangeInt64(0, 10, 20)
|
|
||||||
vals = cfg.Section("").Key("UINT").RangeUint(0, 3, 9)
|
|
||||||
vals = cfg.Section("").Key("UINT64").RangeUint64(0, 3, 9)
|
|
||||||
vals = cfg.Section("").Key("TIME").RangeTimeFormat(time.RFC3339, time.Now(), minTime, maxTime)
|
|
||||||
vals = cfg.Section("").Key("TIME").RangeTime(time.Now(), minTime, maxTime) // RFC3339
|
|
||||||
```
|
|
||||||
|
|
||||||
自动分割键值为切片(slice):
|
|
||||||
|
|
||||||
```go
|
|
||||||
vals = cfg.Section("").Key("STRINGS").Strings(",")
|
|
||||||
vals = cfg.Section("").Key("FLOAT64S").Float64s(",")
|
|
||||||
vals = cfg.Section("").Key("INTS").Ints(",")
|
|
||||||
vals = cfg.Section("").Key("INT64S").Int64s(",")
|
|
||||||
vals = cfg.Section("").Key("UINTS").Uints(",")
|
|
||||||
vals = cfg.Section("").Key("UINT64S").Uint64s(",")
|
|
||||||
vals = cfg.Section("").Key("TIMES").Times(",")
|
|
||||||
```
|
|
||||||
|
|
||||||
### 保存配置
|
|
||||||
|
|
||||||
终于到了这个时刻,是时候保存一下配置了。
|
|
||||||
|
|
||||||
比较原始的做法是输出配置到某个文件:
|
|
||||||
|
|
||||||
```go
|
|
||||||
// ...
|
|
||||||
err = cfg.SaveTo("my.ini")
|
|
||||||
err = cfg.SaveToIndent("my.ini", "\t")
|
|
||||||
```
|
|
||||||
|
|
||||||
另一个比较高级的做法是写入到任何实现 `io.Writer` 接口的对象中:
|
|
||||||
|
|
||||||
```go
|
|
||||||
// ...
|
|
||||||
cfg.WriteTo(writer)
|
|
||||||
cfg.WriteToIndent(writer, "\t")
|
|
||||||
```
|
|
||||||
|
|
||||||
### 高级用法
|
|
||||||
|
|
||||||
#### 递归读取键值
|
|
||||||
|
|
||||||
在获取所有键值的过程中,特殊语法 `%(<name>)s` 会被应用,其中 `<name>` 可以是相同分区或者默认分区下的键名。字符串 `%(<name>)s` 会被相应的键值所替代,如果指定的键不存在,则会用空字符串替代。您可以最多使用 99 层的递归嵌套。
|
|
||||||
|
|
||||||
```ini
|
|
||||||
NAME = ini
|
|
||||||
|
|
||||||
[author]
|
|
||||||
NAME = Unknwon
|
|
||||||
GITHUB = https://github.com/%(NAME)s
|
|
||||||
|
|
||||||
[package]
|
|
||||||
FULL_NAME = github.com/go-ini/%(NAME)s
|
|
||||||
```
|
|
||||||
|
|
||||||
```go
|
|
||||||
cfg.Section("author").Key("GITHUB").String() // https://github.com/Unknwon
|
|
||||||
cfg.Section("package").Key("FULL_NAME").String() // github.com/go-ini/ini
|
|
||||||
```
|
|
||||||
|
|
||||||
#### 读取父子分区
|
|
||||||
|
|
||||||
您可以在分区名称中使用 `.` 来表示两个或多个分区之间的父子关系。如果某个键在子分区中不存在,则会去它的父分区中再次寻找,直到没有父分区为止。
|
|
||||||
|
|
||||||
```ini
|
|
||||||
NAME = ini
|
|
||||||
VERSION = v1
|
|
||||||
IMPORT_PATH = gopkg.in/%(NAME)s.%(VERSION)s
|
|
||||||
|
|
||||||
[package]
|
|
||||||
CLONE_URL = https://%(IMPORT_PATH)s
|
|
||||||
|
|
||||||
[package.sub]
|
|
||||||
```
|
|
||||||
|
|
||||||
```go
|
|
||||||
cfg.Section("package.sub").Key("CLONE_URL").String() // https://gopkg.in/ini.v1
|
|
||||||
```
|
|
||||||
|
|
||||||
#### 读取自增键名
|
|
||||||
|
|
||||||
如果数据源中的键名为 `-`,则认为该键使用了自增键名的特殊语法。计数器从 1 开始,并且分区之间是相互独立的。
|
|
||||||
|
|
||||||
```ini
|
|
||||||
[features]
|
|
||||||
-: Support read/write comments of keys and sections
|
|
||||||
-: Support auto-increment of key names
|
|
||||||
-: Support load multiple files to overwrite key values
|
|
||||||
```
|
|
||||||
|
|
||||||
```go
|
|
||||||
cfg.Section("features").KeyStrings() // []{"#1", "#2", "#3"}
|
|
||||||
```
|
|
||||||
|
|
||||||
### 映射到结构
|
|
||||||
|
|
||||||
想要使用更加面向对象的方式玩转 INI 吗?好主意。
|
|
||||||
|
|
||||||
```ini
|
|
||||||
Name = Unknwon
|
|
||||||
age = 21
|
|
||||||
Male = true
|
|
||||||
Born = 1993-01-01T20:17:05Z
|
|
||||||
|
|
||||||
[Note]
|
|
||||||
Content = Hi is a good man!
|
|
||||||
Cities = HangZhou, Boston
|
|
||||||
```
|
|
||||||
|
|
||||||
```go
|
|
||||||
type Note struct {
|
|
||||||
Content string
|
|
||||||
Cities []string
|
|
||||||
}
|
|
||||||
|
|
||||||
type Person struct {
|
|
||||||
Name string
|
|
||||||
Age int `ini:"age"`
|
|
||||||
Male bool
|
|
||||||
Born time.Time
|
|
||||||
Note
|
|
||||||
Created time.Time `ini:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
cfg, err := ini.Load("path/to/ini")
|
|
||||||
// ...
|
|
||||||
p := new(Person)
|
|
||||||
err = cfg.MapTo(p)
|
|
||||||
// ...
|
|
||||||
|
|
||||||
// 一切竟可以如此的简单。
|
|
||||||
err = ini.MapTo(p, "path/to/ini")
|
|
||||||
// ...
|
|
||||||
|
|
||||||
// 嗯哼?只需要映射一个分区吗?
|
|
||||||
n := new(Note)
|
|
||||||
err = cfg.Section("Note").MapTo(n)
|
|
||||||
// ...
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
结构的字段怎么设置默认值呢?很简单,只要在映射之前对指定字段进行赋值就可以了。如果键未找到或者类型错误,该值不会发生改变。
|
|
||||||
|
|
||||||
```go
|
|
||||||
// ...
|
|
||||||
p := &Person{
|
|
||||||
Name: "Joe",
|
|
||||||
}
|
|
||||||
// ...
|
|
||||||
```
|
|
||||||
|
|
||||||
这样玩 INI 真的好酷啊!然而,如果不能还给我原来的配置文件,有什么卵用?
|
|
||||||
|
|
||||||
### 从结构反射
|
|
||||||
|
|
||||||
可是,我有说不能吗?
|
|
||||||
|
|
||||||
```go
|
|
||||||
type Embeded struct {
|
|
||||||
Dates []time.Time `delim:"|"`
|
|
||||||
Places []string
|
|
||||||
None []int
|
|
||||||
}
|
|
||||||
|
|
||||||
type Author struct {
|
|
||||||
Name string `ini:"NAME"`
|
|
||||||
Male bool
|
|
||||||
Age int
|
|
||||||
GPA float64
|
|
||||||
NeverMind string `ini:"-"`
|
|
||||||
*Embeded
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
a := &Author{"Unknwon", true, 21, 2.8, "",
|
|
||||||
&Embeded{
|
|
||||||
[]time.Time{time.Now(), time.Now()},
|
|
||||||
[]string{"HangZhou", "Boston"},
|
|
||||||
[]int{},
|
|
||||||
}}
|
|
||||||
cfg := ini.Empty()
|
|
||||||
err = ini.ReflectFrom(cfg, a)
|
|
||||||
// ...
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
瞧瞧,奇迹发生了。
|
|
||||||
|
|
||||||
```ini
|
|
||||||
NAME = Unknwon
|
|
||||||
Male = true
|
|
||||||
Age = 21
|
|
||||||
GPA = 2.8
|
|
||||||
|
|
||||||
[Embeded]
|
|
||||||
Dates = 2015-08-07T22:14:22+08:00|2015-08-07T22:14:22+08:00
|
|
||||||
Places = HangZhou,Boston
|
|
||||||
None =
|
|
||||||
```
|
|
||||||
|
|
||||||
#### 名称映射器(Name Mapper)
|
|
||||||
|
|
||||||
为了节省您的时间并简化代码,本库支持类型为 [`NameMapper`](https://gowalker.org/gopkg.in/ini.v1#NameMapper) 的名称映射器,该映射器负责结构字段名与分区名和键名之间的映射。
|
|
||||||
|
|
||||||
目前有 2 款内置的映射器:
|
|
||||||
|
|
||||||
- `AllCapsUnderscore`:该映射器将字段名转换至格式 `ALL_CAPS_UNDERSCORE` 后再去匹配分区名和键名。
|
|
||||||
- `TitleUnderscore`:该映射器将字段名转换至格式 `title_underscore` 后再去匹配分区名和键名。
|
|
||||||
|
|
||||||
使用方法:
|
|
||||||
|
|
||||||
```go
|
|
||||||
type Info struct{
|
|
||||||
PackageName string
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
err = ini.MapToWithMapper(&Info{}, ini.TitleUnderscore, []byte("packag_name=ini"))
|
|
||||||
// ...
|
|
||||||
|
|
||||||
cfg, err := ini.Load([]byte("PACKAGE_NAME=ini"))
|
|
||||||
// ...
|
|
||||||
info := new(Info)
|
|
||||||
cfg.NameMapper = ini.AllCapsUnderscore
|
|
||||||
err = cfg.MapTo(info)
|
|
||||||
// ...
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
使用函数 `ini.ReflectFromWithMapper` 时也可应用相同的规则。
|
|
||||||
|
|
||||||
#### 映射/反射的其它说明
|
|
||||||
|
|
||||||
任何嵌入的结构都会被默认认作一个不同的分区,并且不会自动产生所谓的父子分区关联:
|
|
||||||
|
|
||||||
```go
|
|
||||||
type Child struct {
|
|
||||||
Age string
|
|
||||||
}
|
|
||||||
|
|
||||||
type Parent struct {
|
|
||||||
Name string
|
|
||||||
Child
|
|
||||||
}
|
|
||||||
|
|
||||||
type Config struct {
|
|
||||||
City string
|
|
||||||
Parent
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
示例配置文件:
|
|
||||||
|
|
||||||
```ini
|
|
||||||
City = Boston
|
|
||||||
|
|
||||||
[Parent]
|
|
||||||
Name = Unknwon
|
|
||||||
|
|
||||||
[Child]
|
|
||||||
Age = 21
|
|
||||||
```
|
|
||||||
|
|
||||||
很好,但是,我就是要嵌入结构也在同一个分区。好吧,你爹是李刚!
|
|
||||||
|
|
||||||
```go
|
|
||||||
type Child struct {
|
|
||||||
Age string
|
|
||||||
}
|
|
||||||
|
|
||||||
type Parent struct {
|
|
||||||
Name string
|
|
||||||
Child `ini:"Parent"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type Config struct {
|
|
||||||
City string
|
|
||||||
Parent
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
示例配置文件:
|
|
||||||
|
|
||||||
```ini
|
|
||||||
City = Boston
|
|
||||||
|
|
||||||
[Parent]
|
|
||||||
Name = Unknwon
|
|
||||||
Age = 21
|
|
||||||
```
|
|
||||||
|
|
||||||
## 获取帮助
|
|
||||||
|
|
||||||
- [API 文档](https://gowalker.org/gopkg.in/ini.v1)
|
|
||||||
- [创建工单](https://github.com/go-ini/ini/issues/new)
|
|
||||||
|
|
||||||
## 常见问题
|
|
||||||
|
|
||||||
### 字段 `BlockMode` 是什么?
|
|
||||||
|
|
||||||
默认情况下,本库会在您进行读写操作时采用锁机制来确保数据时间。但在某些情况下,您非常确定只进行读操作。此时,您可以通过设置 `cfg.BlockMode = false` 来将读操作提升大约 **50-70%** 的性能。
|
|
||||||
|
|
||||||
### 为什么要写另一个 INI 解析库?
|
|
||||||
|
|
||||||
许多人都在使用我的 [goconfig](https://github.com/Unknwon/goconfig) 来完成对 INI 文件的操作,但我希望使用更加 Go 风格的代码。并且当您设置 `cfg.BlockMode = false` 时,会有大约 **10-30%** 的性能提升。
|
|
||||||
|
|
||||||
为了做出这些改变,我必须对 API 进行破坏,所以新开一个仓库是最安全的做法。除此之外,本库直接使用 `gopkg.in` 来进行版本化发布。(其实真相是导入路径更短了)
|
|
||||||
1226
vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/ini.go
generated
vendored
1226
vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/ini.go
generated
vendored
File diff suppressed because it is too large
Load Diff
350
vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/struct.go
generated
vendored
350
vendor/github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini/struct.go
generated
vendored
@@ -1,350 +0,0 @@
|
|||||||
// Copyright 2014 Unknwon
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License"): you may
|
|
||||||
// not use this file except in compliance with the License. You may obtain
|
|
||||||
// a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
// License for the specific language governing permissions and limitations
|
|
||||||
// under the License.
|
|
||||||
|
|
||||||
package ini
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"reflect"
|
|
||||||
"time"
|
|
||||||
"unicode"
|
|
||||||
)
|
|
||||||
|
|
||||||
// NameMapper represents a ini tag name mapper.
|
|
||||||
type NameMapper func(string) string
|
|
||||||
|
|
||||||
// Built-in name getters.
|
|
||||||
var (
|
|
||||||
// AllCapsUnderscore converts to format ALL_CAPS_UNDERSCORE.
|
|
||||||
AllCapsUnderscore NameMapper = func(raw string) string {
|
|
||||||
newstr := make([]rune, 0, len(raw))
|
|
||||||
for i, chr := range raw {
|
|
||||||
if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
|
|
||||||
if i > 0 {
|
|
||||||
newstr = append(newstr, '_')
|
|
||||||
}
|
|
||||||
}
|
|
||||||
newstr = append(newstr, unicode.ToUpper(chr))
|
|
||||||
}
|
|
||||||
return string(newstr)
|
|
||||||
}
|
|
||||||
// TitleUnderscore converts to format title_underscore.
|
|
||||||
TitleUnderscore NameMapper = func(raw string) string {
|
|
||||||
newstr := make([]rune, 0, len(raw))
|
|
||||||
for i, chr := range raw {
|
|
||||||
if isUpper := 'A' <= chr && chr <= 'Z'; isUpper {
|
|
||||||
if i > 0 {
|
|
||||||
newstr = append(newstr, '_')
|
|
||||||
}
|
|
||||||
chr -= ('A' - 'a')
|
|
||||||
}
|
|
||||||
newstr = append(newstr, chr)
|
|
||||||
}
|
|
||||||
return string(newstr)
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
func (s *Section) parseFieldName(raw, actual string) string {
|
|
||||||
if len(actual) > 0 {
|
|
||||||
return actual
|
|
||||||
}
|
|
||||||
if s.f.NameMapper != nil {
|
|
||||||
return s.f.NameMapper(raw)
|
|
||||||
}
|
|
||||||
return raw
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseDelim(actual string) string {
|
|
||||||
if len(actual) > 0 {
|
|
||||||
return actual
|
|
||||||
}
|
|
||||||
return ","
|
|
||||||
}
|
|
||||||
|
|
||||||
var reflectTime = reflect.TypeOf(time.Now()).Kind()
|
|
||||||
|
|
||||||
// setWithProperType sets proper value to field based on its type,
|
|
||||||
// but it does not return error for failing parsing,
|
|
||||||
// because we want to use default value that is already assigned to strcut.
|
|
||||||
func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error {
|
|
||||||
switch t.Kind() {
|
|
||||||
case reflect.String:
|
|
||||||
if len(key.String()) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
field.SetString(key.String())
|
|
||||||
case reflect.Bool:
|
|
||||||
boolVal, err := key.Bool()
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
field.SetBool(boolVal)
|
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
|
||||||
durationVal, err := key.Duration()
|
|
||||||
if err == nil {
|
|
||||||
field.Set(reflect.ValueOf(durationVal))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
intVal, err := key.Int64()
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
field.SetInt(intVal)
|
|
||||||
// byte is an alias for uint8, so supporting uint8 breaks support for byte
|
|
||||||
case reflect.Uint, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
|
||||||
durationVal, err := key.Duration()
|
|
||||||
if err == nil {
|
|
||||||
field.Set(reflect.ValueOf(durationVal))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
uintVal, err := key.Uint64()
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
field.SetUint(uintVal)
|
|
||||||
|
|
||||||
case reflect.Float64:
|
|
||||||
floatVal, err := key.Float64()
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
field.SetFloat(floatVal)
|
|
||||||
case reflectTime:
|
|
||||||
timeVal, err := key.Time()
|
|
||||||
if err != nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
field.Set(reflect.ValueOf(timeVal))
|
|
||||||
case reflect.Slice:
|
|
||||||
vals := key.Strings(delim)
|
|
||||||
numVals := len(vals)
|
|
||||||
if numVals == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
sliceOf := field.Type().Elem().Kind()
|
|
||||||
|
|
||||||
var times []time.Time
|
|
||||||
if sliceOf == reflectTime {
|
|
||||||
times = key.Times(delim)
|
|
||||||
}
|
|
||||||
|
|
||||||
slice := reflect.MakeSlice(field.Type(), numVals, numVals)
|
|
||||||
for i := 0; i < numVals; i++ {
|
|
||||||
switch sliceOf {
|
|
||||||
case reflectTime:
|
|
||||||
slice.Index(i).Set(reflect.ValueOf(times[i]))
|
|
||||||
default:
|
|
||||||
slice.Index(i).Set(reflect.ValueOf(vals[i]))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
field.Set(slice)
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("unsupported type '%s'", t)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Section) mapTo(val reflect.Value) error {
|
|
||||||
if val.Kind() == reflect.Ptr {
|
|
||||||
val = val.Elem()
|
|
||||||
}
|
|
||||||
typ := val.Type()
|
|
||||||
|
|
||||||
for i := 0; i < typ.NumField(); i++ {
|
|
||||||
field := val.Field(i)
|
|
||||||
tpField := typ.Field(i)
|
|
||||||
|
|
||||||
tag := tpField.Tag.Get("ini")
|
|
||||||
if tag == "-" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
fieldName := s.parseFieldName(tpField.Name, tag)
|
|
||||||
if len(fieldName) == 0 || !field.CanSet() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
isAnonymous := tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous
|
|
||||||
isStruct := tpField.Type.Kind() == reflect.Struct
|
|
||||||
if isAnonymous {
|
|
||||||
field.Set(reflect.New(tpField.Type.Elem()))
|
|
||||||
}
|
|
||||||
|
|
||||||
if isAnonymous || isStruct {
|
|
||||||
if sec, err := s.f.GetSection(fieldName); err == nil {
|
|
||||||
if err = sec.mapTo(field); err != nil {
|
|
||||||
return fmt.Errorf("error mapping field(%s): %v", fieldName, err)
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if key, err := s.GetKey(fieldName); err == nil {
|
|
||||||
if err = setWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil {
|
|
||||||
return fmt.Errorf("error mapping field(%s): %v", fieldName, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MapTo maps section to given struct.
|
|
||||||
func (s *Section) MapTo(v interface{}) error {
|
|
||||||
typ := reflect.TypeOf(v)
|
|
||||||
val := reflect.ValueOf(v)
|
|
||||||
if typ.Kind() == reflect.Ptr {
|
|
||||||
typ = typ.Elem()
|
|
||||||
val = val.Elem()
|
|
||||||
} else {
|
|
||||||
return errors.New("cannot map to non-pointer struct")
|
|
||||||
}
|
|
||||||
|
|
||||||
return s.mapTo(val)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MapTo maps file to given struct.
|
|
||||||
func (f *File) MapTo(v interface{}) error {
|
|
||||||
return f.Section("").MapTo(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MapTo maps data sources to given struct with name mapper.
|
|
||||||
func MapToWithMapper(v interface{}, mapper NameMapper, source interface{}, others ...interface{}) error {
|
|
||||||
cfg, err := Load(source, others...)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
cfg.NameMapper = mapper
|
|
||||||
return cfg.MapTo(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MapTo maps data sources to given struct.
|
|
||||||
func MapTo(v, source interface{}, others ...interface{}) error {
|
|
||||||
return MapToWithMapper(v, nil, source, others...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// reflectWithProperType does the opposite thing with setWithProperType.
|
|
||||||
func reflectWithProperType(t reflect.Type, key *Key, field reflect.Value, delim string) error {
|
|
||||||
switch t.Kind() {
|
|
||||||
case reflect.String:
|
|
||||||
key.SetValue(field.String())
|
|
||||||
case reflect.Bool,
|
|
||||||
reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
|
|
||||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
|
|
||||||
reflect.Float64,
|
|
||||||
reflectTime:
|
|
||||||
key.SetValue(fmt.Sprint(field))
|
|
||||||
case reflect.Slice:
|
|
||||||
vals := field.Slice(0, field.Len())
|
|
||||||
if field.Len() == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var buf bytes.Buffer
|
|
||||||
isTime := fmt.Sprint(field.Type()) == "[]time.Time"
|
|
||||||
for i := 0; i < field.Len(); i++ {
|
|
||||||
if isTime {
|
|
||||||
buf.WriteString(vals.Index(i).Interface().(time.Time).Format(time.RFC3339))
|
|
||||||
} else {
|
|
||||||
buf.WriteString(fmt.Sprint(vals.Index(i)))
|
|
||||||
}
|
|
||||||
buf.WriteString(delim)
|
|
||||||
}
|
|
||||||
key.SetValue(buf.String()[:buf.Len()-1])
|
|
||||||
default:
|
|
||||||
return fmt.Errorf("unsupported type '%s'", t)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *Section) reflectFrom(val reflect.Value) error {
|
|
||||||
if val.Kind() == reflect.Ptr {
|
|
||||||
val = val.Elem()
|
|
||||||
}
|
|
||||||
typ := val.Type()
|
|
||||||
|
|
||||||
for i := 0; i < typ.NumField(); i++ {
|
|
||||||
field := val.Field(i)
|
|
||||||
tpField := typ.Field(i)
|
|
||||||
|
|
||||||
tag := tpField.Tag.Get("ini")
|
|
||||||
if tag == "-" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
fieldName := s.parseFieldName(tpField.Name, tag)
|
|
||||||
if len(fieldName) == 0 || !field.CanSet() {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if (tpField.Type.Kind() == reflect.Ptr && tpField.Anonymous) ||
|
|
||||||
(tpField.Type.Kind() == reflect.Struct) {
|
|
||||||
// Note: The only error here is section doesn't exist.
|
|
||||||
sec, err := s.f.GetSection(fieldName)
|
|
||||||
if err != nil {
|
|
||||||
// Note: fieldName can never be empty here, ignore error.
|
|
||||||
sec, _ = s.f.NewSection(fieldName)
|
|
||||||
}
|
|
||||||
if err = sec.reflectFrom(field); err != nil {
|
|
||||||
return fmt.Errorf("error reflecting field(%s): %v", fieldName, err)
|
|
||||||
}
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Note: Same reason as secion.
|
|
||||||
key, err := s.GetKey(fieldName)
|
|
||||||
if err != nil {
|
|
||||||
key, _ = s.NewKey(fieldName, "")
|
|
||||||
}
|
|
||||||
if err = reflectWithProperType(tpField.Type, key, field, parseDelim(tpField.Tag.Get("delim"))); err != nil {
|
|
||||||
return fmt.Errorf("error reflecting field(%s): %v", fieldName, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReflectFrom reflects secion from given struct.
|
|
||||||
func (s *Section) ReflectFrom(v interface{}) error {
|
|
||||||
typ := reflect.TypeOf(v)
|
|
||||||
val := reflect.ValueOf(v)
|
|
||||||
if typ.Kind() == reflect.Ptr {
|
|
||||||
typ = typ.Elem()
|
|
||||||
val = val.Elem()
|
|
||||||
} else {
|
|
||||||
return errors.New("cannot reflect from non-pointer struct")
|
|
||||||
}
|
|
||||||
|
|
||||||
return s.reflectFrom(val)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReflectFrom reflects file from given struct.
|
|
||||||
func (f *File) ReflectFrom(v interface{}) error {
|
|
||||||
return f.Section("").ReflectFrom(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReflectFrom reflects data sources from given struct with name mapper.
|
|
||||||
func ReflectFromWithMapper(cfg *File, v interface{}, mapper NameMapper) error {
|
|
||||||
cfg.NameMapper = mapper
|
|
||||||
return cfg.ReflectFrom(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReflectFrom reflects data sources from given struct.
|
|
||||||
func ReflectFrom(cfg *File, v interface{}) error {
|
|
||||||
return ReflectFromWithMapper(cfg, v, nil)
|
|
||||||
}
|
|
||||||
4
vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/.gitignore
generated
vendored
4
vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/.gitignore
generated
vendored
@@ -1,4 +0,0 @@
|
|||||||
jpgo
|
|
||||||
jmespath-fuzz.zip
|
|
||||||
cpu.out
|
|
||||||
go-jmespath.test
|
|
||||||
9
vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/.travis.yml
generated
vendored
9
vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/.travis.yml
generated
vendored
@@ -1,9 +0,0 @@
|
|||||||
language: go
|
|
||||||
|
|
||||||
sudo: false
|
|
||||||
|
|
||||||
go:
|
|
||||||
- 1.4
|
|
||||||
|
|
||||||
install: go get -v -t ./...
|
|
||||||
script: make test
|
|
||||||
13
vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/LICENSE
generated
vendored
13
vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/LICENSE
generated
vendored
@@ -1,13 +0,0 @@
|
|||||||
Copyright 2015 James Saryerwinnie
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
||||||
44
vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/Makefile
generated
vendored
44
vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/Makefile
generated
vendored
@@ -1,44 +0,0 @@
|
|||||||
|
|
||||||
CMD = jpgo
|
|
||||||
|
|
||||||
help:
|
|
||||||
@echo "Please use \`make <target>' where <target> is one of"
|
|
||||||
@echo " test to run all the tests"
|
|
||||||
@echo " build to build the library and jp executable"
|
|
||||||
@echo " generate to run codegen"
|
|
||||||
|
|
||||||
|
|
||||||
generate:
|
|
||||||
go generate ./...
|
|
||||||
|
|
||||||
build:
|
|
||||||
rm -f $(CMD)
|
|
||||||
go build ./...
|
|
||||||
rm -f cmd/$(CMD)/$(CMD) && cd cmd/$(CMD)/ && go build ./...
|
|
||||||
mv cmd/$(CMD)/$(CMD) .
|
|
||||||
|
|
||||||
test:
|
|
||||||
go test -v ./...
|
|
||||||
|
|
||||||
check:
|
|
||||||
go vet ./...
|
|
||||||
@echo "golint ./..."
|
|
||||||
@lint=`golint ./...`; \
|
|
||||||
lint=`echo "$$lint" | grep -v "astnodetype_string.go" | grep -v "toktype_string.go"`; \
|
|
||||||
echo "$$lint"; \
|
|
||||||
if [ "$$lint" != "" ]; then exit 1; fi
|
|
||||||
|
|
||||||
htmlc:
|
|
||||||
go test -coverprofile="/tmp/jpcov" && go tool cover -html="/tmp/jpcov" && unlink /tmp/jpcov
|
|
||||||
|
|
||||||
buildfuzz:
|
|
||||||
go-fuzz-build github.com/jmespath/go-jmespath/fuzz
|
|
||||||
|
|
||||||
fuzz: buildfuzz
|
|
||||||
go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/corpus
|
|
||||||
|
|
||||||
bench:
|
|
||||||
go test -bench . -cpuprofile cpu.out
|
|
||||||
|
|
||||||
pprof-cpu:
|
|
||||||
go tool pprof ./go-jmespath.test ./cpu.out
|
|
||||||
7
vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/README.md
generated
vendored
7
vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/README.md
generated
vendored
@@ -1,7 +0,0 @@
|
|||||||
# go-jmespath - A JMESPath implementation in Go
|
|
||||||
|
|
||||||
[](https://travis-ci.org/jmespath/go-jmespath)
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
See http://jmespath.org for more info.
|
|
||||||
12
vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/api.go
generated
vendored
12
vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/api.go
generated
vendored
@@ -1,12 +0,0 @@
|
|||||||
package jmespath
|
|
||||||
|
|
||||||
// Search evaluates a JMESPath expression against input data and returns the result.
|
|
||||||
func Search(expression string, data interface{}) (interface{}, error) {
|
|
||||||
intr := newInterpreter()
|
|
||||||
parser := NewParser()
|
|
||||||
ast, err := parser.Parse(expression)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return intr.Execute(ast, data)
|
|
||||||
}
|
|
||||||
@@ -1,16 +0,0 @@
|
|||||||
// generated by stringer -type astNodeType; DO NOT EDIT
|
|
||||||
|
|
||||||
package jmespath
|
|
||||||
|
|
||||||
import "fmt"
|
|
||||||
|
|
||||||
const _astNodeType_name = "ASTEmptyASTComparatorASTCurrentNodeASTExpRefASTFunctionExpressionASTFieldASTFilterProjectionASTFlattenASTIdentityASTIndexASTIndexExpressionASTKeyValPairASTLiteralASTMultiSelectHashASTMultiSelectListASTOrExpressionASTAndExpressionASTNotExpressionASTPipeASTProjectionASTSubexpressionASTSliceASTValueProjection"
|
|
||||||
|
|
||||||
var _astNodeType_index = [...]uint16{0, 8, 21, 35, 44, 65, 73, 92, 102, 113, 121, 139, 152, 162, 180, 198, 213, 229, 245, 252, 265, 281, 289, 307}
|
|
||||||
|
|
||||||
func (i astNodeType) String() string {
|
|
||||||
if i < 0 || i >= astNodeType(len(_astNodeType_index)-1) {
|
|
||||||
return fmt.Sprintf("astNodeType(%d)", i)
|
|
||||||
}
|
|
||||||
return _astNodeType_name[_astNodeType_index[i]:_astNodeType_index[i+1]]
|
|
||||||
}
|
|
||||||
840
vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/functions.go
generated
vendored
840
vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/functions.go
generated
vendored
@@ -1,840 +0,0 @@
|
|||||||
package jmespath
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"math"
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"unicode/utf8"
|
|
||||||
)
|
|
||||||
|
|
||||||
type jpFunction func(arguments []interface{}) (interface{}, error)
|
|
||||||
|
|
||||||
type jpType string
|
|
||||||
|
|
||||||
const (
|
|
||||||
jpUnknown jpType = "unknown"
|
|
||||||
jpNumber jpType = "number"
|
|
||||||
jpString jpType = "string"
|
|
||||||
jpArray jpType = "array"
|
|
||||||
jpObject jpType = "object"
|
|
||||||
jpArrayNumber jpType = "array[number]"
|
|
||||||
jpArrayString jpType = "array[string]"
|
|
||||||
jpExpref jpType = "expref"
|
|
||||||
jpAny jpType = "any"
|
|
||||||
)
|
|
||||||
|
|
||||||
type functionEntry struct {
|
|
||||||
name string
|
|
||||||
arguments []argSpec
|
|
||||||
handler jpFunction
|
|
||||||
hasExpRef bool
|
|
||||||
}
|
|
||||||
|
|
||||||
type argSpec struct {
|
|
||||||
types []jpType
|
|
||||||
variadic bool
|
|
||||||
}
|
|
||||||
|
|
||||||
type byExprString struct {
|
|
||||||
intr *treeInterpreter
|
|
||||||
node ASTNode
|
|
||||||
items []interface{}
|
|
||||||
hasError bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *byExprString) Len() int {
|
|
||||||
return len(a.items)
|
|
||||||
}
|
|
||||||
func (a *byExprString) Swap(i, j int) {
|
|
||||||
a.items[i], a.items[j] = a.items[j], a.items[i]
|
|
||||||
}
|
|
||||||
func (a *byExprString) Less(i, j int) bool {
|
|
||||||
first, err := a.intr.Execute(a.node, a.items[i])
|
|
||||||
if err != nil {
|
|
||||||
a.hasError = true
|
|
||||||
// Return a dummy value.
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
ith, ok := first.(string)
|
|
||||||
if !ok {
|
|
||||||
a.hasError = true
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
second, err := a.intr.Execute(a.node, a.items[j])
|
|
||||||
if err != nil {
|
|
||||||
a.hasError = true
|
|
||||||
// Return a dummy value.
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
jth, ok := second.(string)
|
|
||||||
if !ok {
|
|
||||||
a.hasError = true
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return ith < jth
|
|
||||||
}
|
|
||||||
|
|
||||||
type byExprFloat struct {
|
|
||||||
intr *treeInterpreter
|
|
||||||
node ASTNode
|
|
||||||
items []interface{}
|
|
||||||
hasError bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *byExprFloat) Len() int {
|
|
||||||
return len(a.items)
|
|
||||||
}
|
|
||||||
func (a *byExprFloat) Swap(i, j int) {
|
|
||||||
a.items[i], a.items[j] = a.items[j], a.items[i]
|
|
||||||
}
|
|
||||||
func (a *byExprFloat) Less(i, j int) bool {
|
|
||||||
first, err := a.intr.Execute(a.node, a.items[i])
|
|
||||||
if err != nil {
|
|
||||||
a.hasError = true
|
|
||||||
// Return a dummy value.
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
ith, ok := first.(float64)
|
|
||||||
if !ok {
|
|
||||||
a.hasError = true
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
second, err := a.intr.Execute(a.node, a.items[j])
|
|
||||||
if err != nil {
|
|
||||||
a.hasError = true
|
|
||||||
// Return a dummy value.
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
jth, ok := second.(float64)
|
|
||||||
if !ok {
|
|
||||||
a.hasError = true
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return ith < jth
|
|
||||||
}
|
|
||||||
|
|
||||||
type functionCaller struct {
|
|
||||||
functionTable map[string]functionEntry
|
|
||||||
}
|
|
||||||
|
|
||||||
func newFunctionCaller() *functionCaller {
|
|
||||||
caller := &functionCaller{}
|
|
||||||
caller.functionTable = map[string]functionEntry{
|
|
||||||
"length": functionEntry{
|
|
||||||
name: "length",
|
|
||||||
arguments: []argSpec{
|
|
||||||
argSpec{types: []jpType{jpString, jpArray, jpObject}},
|
|
||||||
},
|
|
||||||
handler: jpfLength,
|
|
||||||
},
|
|
||||||
"starts_with": functionEntry{
|
|
||||||
name: "starts_with",
|
|
||||||
arguments: []argSpec{
|
|
||||||
argSpec{types: []jpType{jpString}},
|
|
||||||
argSpec{types: []jpType{jpString}},
|
|
||||||
},
|
|
||||||
handler: jpfStartsWith,
|
|
||||||
},
|
|
||||||
"abs": functionEntry{
|
|
||||||
name: "abs",
|
|
||||||
arguments: []argSpec{
|
|
||||||
argSpec{types: []jpType{jpNumber}},
|
|
||||||
},
|
|
||||||
handler: jpfAbs,
|
|
||||||
},
|
|
||||||
"avg": functionEntry{
|
|
||||||
name: "avg",
|
|
||||||
arguments: []argSpec{
|
|
||||||
argSpec{types: []jpType{jpArrayNumber}},
|
|
||||||
},
|
|
||||||
handler: jpfAvg,
|
|
||||||
},
|
|
||||||
"ceil": functionEntry{
|
|
||||||
name: "ceil",
|
|
||||||
arguments: []argSpec{
|
|
||||||
argSpec{types: []jpType{jpNumber}},
|
|
||||||
},
|
|
||||||
handler: jpfCeil,
|
|
||||||
},
|
|
||||||
"contains": functionEntry{
|
|
||||||
name: "contains",
|
|
||||||
arguments: []argSpec{
|
|
||||||
argSpec{types: []jpType{jpArray, jpString}},
|
|
||||||
argSpec{types: []jpType{jpAny}},
|
|
||||||
},
|
|
||||||
handler: jpfContains,
|
|
||||||
},
|
|
||||||
"ends_with": functionEntry{
|
|
||||||
name: "ends_with",
|
|
||||||
arguments: []argSpec{
|
|
||||||
argSpec{types: []jpType{jpString}},
|
|
||||||
argSpec{types: []jpType{jpString}},
|
|
||||||
},
|
|
||||||
handler: jpfEndsWith,
|
|
||||||
},
|
|
||||||
"floor": functionEntry{
|
|
||||||
name: "floor",
|
|
||||||
arguments: []argSpec{
|
|
||||||
argSpec{types: []jpType{jpNumber}},
|
|
||||||
},
|
|
||||||
handler: jpfFloor,
|
|
||||||
},
|
|
||||||
"map": functionEntry{
|
|
||||||
name: "amp",
|
|
||||||
arguments: []argSpec{
|
|
||||||
argSpec{types: []jpType{jpExpref}},
|
|
||||||
argSpec{types: []jpType{jpArray}},
|
|
||||||
},
|
|
||||||
handler: jpfMap,
|
|
||||||
hasExpRef: true,
|
|
||||||
},
|
|
||||||
"max": functionEntry{
|
|
||||||
name: "max",
|
|
||||||
arguments: []argSpec{
|
|
||||||
argSpec{types: []jpType{jpArrayNumber, jpArrayString}},
|
|
||||||
},
|
|
||||||
handler: jpfMax,
|
|
||||||
},
|
|
||||||
"merge": functionEntry{
|
|
||||||
name: "merge",
|
|
||||||
arguments: []argSpec{
|
|
||||||
argSpec{types: []jpType{jpObject}, variadic: true},
|
|
||||||
},
|
|
||||||
handler: jpfMerge,
|
|
||||||
},
|
|
||||||
"max_by": functionEntry{
|
|
||||||
name: "max_by",
|
|
||||||
arguments: []argSpec{
|
|
||||||
argSpec{types: []jpType{jpArray}},
|
|
||||||
argSpec{types: []jpType{jpExpref}},
|
|
||||||
},
|
|
||||||
handler: jpfMaxBy,
|
|
||||||
hasExpRef: true,
|
|
||||||
},
|
|
||||||
"sum": functionEntry{
|
|
||||||
name: "sum",
|
|
||||||
arguments: []argSpec{
|
|
||||||
argSpec{types: []jpType{jpArrayNumber}},
|
|
||||||
},
|
|
||||||
handler: jpfSum,
|
|
||||||
},
|
|
||||||
"min": functionEntry{
|
|
||||||
name: "min",
|
|
||||||
arguments: []argSpec{
|
|
||||||
argSpec{types: []jpType{jpArrayNumber, jpArrayString}},
|
|
||||||
},
|
|
||||||
handler: jpfMin,
|
|
||||||
},
|
|
||||||
"min_by": functionEntry{
|
|
||||||
name: "min_by",
|
|
||||||
arguments: []argSpec{
|
|
||||||
argSpec{types: []jpType{jpArray}},
|
|
||||||
argSpec{types: []jpType{jpExpref}},
|
|
||||||
},
|
|
||||||
handler: jpfMinBy,
|
|
||||||
hasExpRef: true,
|
|
||||||
},
|
|
||||||
"type": functionEntry{
|
|
||||||
name: "type",
|
|
||||||
arguments: []argSpec{
|
|
||||||
argSpec{types: []jpType{jpAny}},
|
|
||||||
},
|
|
||||||
handler: jpfType,
|
|
||||||
},
|
|
||||||
"keys": functionEntry{
|
|
||||||
name: "keys",
|
|
||||||
arguments: []argSpec{
|
|
||||||
argSpec{types: []jpType{jpObject}},
|
|
||||||
},
|
|
||||||
handler: jpfKeys,
|
|
||||||
},
|
|
||||||
"values": functionEntry{
|
|
||||||
name: "values",
|
|
||||||
arguments: []argSpec{
|
|
||||||
argSpec{types: []jpType{jpObject}},
|
|
||||||
},
|
|
||||||
handler: jpfValues,
|
|
||||||
},
|
|
||||||
"sort": functionEntry{
|
|
||||||
name: "sort",
|
|
||||||
arguments: []argSpec{
|
|
||||||
argSpec{types: []jpType{jpArrayString, jpArrayNumber}},
|
|
||||||
},
|
|
||||||
handler: jpfSort,
|
|
||||||
},
|
|
||||||
"sort_by": functionEntry{
|
|
||||||
name: "sort_by",
|
|
||||||
arguments: []argSpec{
|
|
||||||
argSpec{types: []jpType{jpArray}},
|
|
||||||
argSpec{types: []jpType{jpExpref}},
|
|
||||||
},
|
|
||||||
handler: jpfSortBy,
|
|
||||||
hasExpRef: true,
|
|
||||||
},
|
|
||||||
"join": functionEntry{
|
|
||||||
name: "join",
|
|
||||||
arguments: []argSpec{
|
|
||||||
argSpec{types: []jpType{jpString}},
|
|
||||||
argSpec{types: []jpType{jpArrayString}},
|
|
||||||
},
|
|
||||||
handler: jpfJoin,
|
|
||||||
},
|
|
||||||
"reverse": functionEntry{
|
|
||||||
name: "reverse",
|
|
||||||
arguments: []argSpec{
|
|
||||||
argSpec{types: []jpType{jpArray, jpString}},
|
|
||||||
},
|
|
||||||
handler: jpfReverse,
|
|
||||||
},
|
|
||||||
"to_array": functionEntry{
|
|
||||||
name: "to_array",
|
|
||||||
arguments: []argSpec{
|
|
||||||
argSpec{types: []jpType{jpAny}},
|
|
||||||
},
|
|
||||||
handler: jpfToArray,
|
|
||||||
},
|
|
||||||
"to_string": functionEntry{
|
|
||||||
name: "to_string",
|
|
||||||
arguments: []argSpec{
|
|
||||||
argSpec{types: []jpType{jpAny}},
|
|
||||||
},
|
|
||||||
handler: jpfToString,
|
|
||||||
},
|
|
||||||
"to_number": functionEntry{
|
|
||||||
name: "to_number",
|
|
||||||
arguments: []argSpec{
|
|
||||||
argSpec{types: []jpType{jpAny}},
|
|
||||||
},
|
|
||||||
handler: jpfToNumber,
|
|
||||||
},
|
|
||||||
"not_null": functionEntry{
|
|
||||||
name: "not_null",
|
|
||||||
arguments: []argSpec{
|
|
||||||
argSpec{types: []jpType{jpAny}, variadic: true},
|
|
||||||
},
|
|
||||||
handler: jpfNotNull,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
return caller
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *functionEntry) resolveArgs(arguments []interface{}) ([]interface{}, error) {
|
|
||||||
if len(e.arguments) == 0 {
|
|
||||||
return arguments, nil
|
|
||||||
}
|
|
||||||
if !e.arguments[len(e.arguments)-1].variadic {
|
|
||||||
if len(e.arguments) != len(arguments) {
|
|
||||||
return nil, errors.New("incorrect number of args")
|
|
||||||
}
|
|
||||||
for i, spec := range e.arguments {
|
|
||||||
userArg := arguments[i]
|
|
||||||
err := spec.typeCheck(userArg)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return arguments, nil
|
|
||||||
}
|
|
||||||
if len(arguments) < len(e.arguments) {
|
|
||||||
return nil, errors.New("Invalid arity.")
|
|
||||||
}
|
|
||||||
return arguments, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (a *argSpec) typeCheck(arg interface{}) error {
|
|
||||||
for _, t := range a.types {
|
|
||||||
switch t {
|
|
||||||
case jpNumber:
|
|
||||||
if _, ok := arg.(float64); ok {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
case jpString:
|
|
||||||
if _, ok := arg.(string); ok {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
case jpArray:
|
|
||||||
if _, ok := arg.([]interface{}); ok {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
case jpObject:
|
|
||||||
if _, ok := arg.(map[string]interface{}); ok {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
case jpArrayNumber:
|
|
||||||
if _, ok := toArrayNum(arg); ok {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
case jpArrayString:
|
|
||||||
if _, ok := toArrayStr(arg); ok {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
case jpAny:
|
|
||||||
return nil
|
|
||||||
case jpExpref:
|
|
||||||
if _, ok := arg.(expRef); ok {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return fmt.Errorf("Invalid type for: %v, expected: %#v", arg, a.types)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *functionCaller) CallFunction(name string, arguments []interface{}, intr *treeInterpreter) (interface{}, error) {
|
|
||||||
entry, ok := f.functionTable[name]
|
|
||||||
if !ok {
|
|
||||||
return nil, errors.New("unknown function: " + name)
|
|
||||||
}
|
|
||||||
resolvedArgs, err := entry.resolveArgs(arguments)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if entry.hasExpRef {
|
|
||||||
var extra []interface{}
|
|
||||||
extra = append(extra, intr)
|
|
||||||
resolvedArgs = append(extra, resolvedArgs...)
|
|
||||||
}
|
|
||||||
return entry.handler(resolvedArgs)
|
|
||||||
}
|
|
||||||
|
|
||||||
func jpfAbs(arguments []interface{}) (interface{}, error) {
|
|
||||||
num := arguments[0].(float64)
|
|
||||||
return math.Abs(num), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func jpfLength(arguments []interface{}) (interface{}, error) {
|
|
||||||
arg := arguments[0]
|
|
||||||
if c, ok := arg.(string); ok {
|
|
||||||
return float64(utf8.RuneCountInString(c)), nil
|
|
||||||
} else if c, ok := arg.([]interface{}); ok {
|
|
||||||
return float64(len(c)), nil
|
|
||||||
} else if c, ok := arg.(map[string]interface{}); ok {
|
|
||||||
return float64(len(c)), nil
|
|
||||||
}
|
|
||||||
return nil, errors.New("could not compute length()")
|
|
||||||
}
|
|
||||||
|
|
||||||
func jpfStartsWith(arguments []interface{}) (interface{}, error) {
|
|
||||||
search := arguments[0].(string)
|
|
||||||
prefix := arguments[1].(string)
|
|
||||||
return strings.HasPrefix(search, prefix), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func jpfAvg(arguments []interface{}) (interface{}, error) {
|
|
||||||
// We've already type checked the value so we can safely use
|
|
||||||
// type assertions.
|
|
||||||
args := arguments[0].([]interface{})
|
|
||||||
length := float64(len(args))
|
|
||||||
numerator := 0.0
|
|
||||||
for _, n := range args {
|
|
||||||
numerator += n.(float64)
|
|
||||||
}
|
|
||||||
return numerator / length, nil
|
|
||||||
}
|
|
||||||
func jpfCeil(arguments []interface{}) (interface{}, error) {
|
|
||||||
val := arguments[0].(float64)
|
|
||||||
return math.Ceil(val), nil
|
|
||||||
}
|
|
||||||
func jpfContains(arguments []interface{}) (interface{}, error) {
|
|
||||||
search := arguments[0]
|
|
||||||
el := arguments[1]
|
|
||||||
if searchStr, ok := search.(string); ok {
|
|
||||||
if elStr, ok := el.(string); ok {
|
|
||||||
return strings.Index(searchStr, elStr) != -1, nil
|
|
||||||
}
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
// Otherwise this is a generic contains for []interface{}
|
|
||||||
general := search.([]interface{})
|
|
||||||
for _, item := range general {
|
|
||||||
if item == el {
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
func jpfEndsWith(arguments []interface{}) (interface{}, error) {
|
|
||||||
search := arguments[0].(string)
|
|
||||||
suffix := arguments[1].(string)
|
|
||||||
return strings.HasSuffix(search, suffix), nil
|
|
||||||
}
|
|
||||||
func jpfFloor(arguments []interface{}) (interface{}, error) {
|
|
||||||
val := arguments[0].(float64)
|
|
||||||
return math.Floor(val), nil
|
|
||||||
}
|
|
||||||
func jpfMap(arguments []interface{}) (interface{}, error) {
|
|
||||||
intr := arguments[0].(*treeInterpreter)
|
|
||||||
exp := arguments[1].(expRef)
|
|
||||||
node := exp.ref
|
|
||||||
arr := arguments[2].([]interface{})
|
|
||||||
mapped := make([]interface{}, 0, len(arr))
|
|
||||||
for _, value := range arr {
|
|
||||||
current, err := intr.Execute(node, value)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
mapped = append(mapped, current)
|
|
||||||
}
|
|
||||||
return mapped, nil
|
|
||||||
}
|
|
||||||
func jpfMax(arguments []interface{}) (interface{}, error) {
|
|
||||||
if items, ok := toArrayNum(arguments[0]); ok {
|
|
||||||
if len(items) == 0 {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
if len(items) == 1 {
|
|
||||||
return items[0], nil
|
|
||||||
}
|
|
||||||
best := items[0]
|
|
||||||
for _, item := range items[1:] {
|
|
||||||
if item > best {
|
|
||||||
best = item
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return best, nil
|
|
||||||
}
|
|
||||||
// Otherwise we're dealing with a max() of strings.
|
|
||||||
items, _ := toArrayStr(arguments[0])
|
|
||||||
if len(items) == 0 {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
if len(items) == 1 {
|
|
||||||
return items[0], nil
|
|
||||||
}
|
|
||||||
best := items[0]
|
|
||||||
for _, item := range items[1:] {
|
|
||||||
if item > best {
|
|
||||||
best = item
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return best, nil
|
|
||||||
}
|
|
||||||
func jpfMerge(arguments []interface{}) (interface{}, error) {
|
|
||||||
final := make(map[string]interface{})
|
|
||||||
for _, m := range arguments {
|
|
||||||
mapped := m.(map[string]interface{})
|
|
||||||
for key, value := range mapped {
|
|
||||||
final[key] = value
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return final, nil
|
|
||||||
}
|
|
||||||
func jpfMaxBy(arguments []interface{}) (interface{}, error) {
|
|
||||||
intr := arguments[0].(*treeInterpreter)
|
|
||||||
arr := arguments[1].([]interface{})
|
|
||||||
exp := arguments[2].(expRef)
|
|
||||||
node := exp.ref
|
|
||||||
if len(arr) == 0 {
|
|
||||||
return nil, nil
|
|
||||||
} else if len(arr) == 1 {
|
|
||||||
return arr[0], nil
|
|
||||||
}
|
|
||||||
start, err := intr.Execute(node, arr[0])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
switch t := start.(type) {
|
|
||||||
case float64:
|
|
||||||
bestVal := t
|
|
||||||
bestItem := arr[0]
|
|
||||||
for _, item := range arr[1:] {
|
|
||||||
result, err := intr.Execute(node, item)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
current, ok := result.(float64)
|
|
||||||
if !ok {
|
|
||||||
return nil, errors.New("invalid type, must be number")
|
|
||||||
}
|
|
||||||
if current > bestVal {
|
|
||||||
bestVal = current
|
|
||||||
bestItem = item
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return bestItem, nil
|
|
||||||
case string:
|
|
||||||
bestVal := t
|
|
||||||
bestItem := arr[0]
|
|
||||||
for _, item := range arr[1:] {
|
|
||||||
result, err := intr.Execute(node, item)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
current, ok := result.(string)
|
|
||||||
if !ok {
|
|
||||||
return nil, errors.New("invalid type, must be string")
|
|
||||||
}
|
|
||||||
if current > bestVal {
|
|
||||||
bestVal = current
|
|
||||||
bestItem = item
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return bestItem, nil
|
|
||||||
default:
|
|
||||||
return nil, errors.New("invalid type, must be number of string")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func jpfSum(arguments []interface{}) (interface{}, error) {
|
|
||||||
items, _ := toArrayNum(arguments[0])
|
|
||||||
sum := 0.0
|
|
||||||
for _, item := range items {
|
|
||||||
sum += item
|
|
||||||
}
|
|
||||||
return sum, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func jpfMin(arguments []interface{}) (interface{}, error) {
|
|
||||||
if items, ok := toArrayNum(arguments[0]); ok {
|
|
||||||
if len(items) == 0 {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
if len(items) == 1 {
|
|
||||||
return items[0], nil
|
|
||||||
}
|
|
||||||
best := items[0]
|
|
||||||
for _, item := range items[1:] {
|
|
||||||
if item < best {
|
|
||||||
best = item
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return best, nil
|
|
||||||
}
|
|
||||||
items, _ := toArrayStr(arguments[0])
|
|
||||||
if len(items) == 0 {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
if len(items) == 1 {
|
|
||||||
return items[0], nil
|
|
||||||
}
|
|
||||||
best := items[0]
|
|
||||||
for _, item := range items[1:] {
|
|
||||||
if item < best {
|
|
||||||
best = item
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return best, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func jpfMinBy(arguments []interface{}) (interface{}, error) {
|
|
||||||
intr := arguments[0].(*treeInterpreter)
|
|
||||||
arr := arguments[1].([]interface{})
|
|
||||||
exp := arguments[2].(expRef)
|
|
||||||
node := exp.ref
|
|
||||||
if len(arr) == 0 {
|
|
||||||
return nil, nil
|
|
||||||
} else if len(arr) == 1 {
|
|
||||||
return arr[0], nil
|
|
||||||
}
|
|
||||||
start, err := intr.Execute(node, arr[0])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if t, ok := start.(float64); ok {
|
|
||||||
bestVal := t
|
|
||||||
bestItem := arr[0]
|
|
||||||
for _, item := range arr[1:] {
|
|
||||||
result, err := intr.Execute(node, item)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
current, ok := result.(float64)
|
|
||||||
if !ok {
|
|
||||||
return nil, errors.New("invalid type, must be number")
|
|
||||||
}
|
|
||||||
if current < bestVal {
|
|
||||||
bestVal = current
|
|
||||||
bestItem = item
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return bestItem, nil
|
|
||||||
} else if t, ok := start.(string); ok {
|
|
||||||
bestVal := t
|
|
||||||
bestItem := arr[0]
|
|
||||||
for _, item := range arr[1:] {
|
|
||||||
result, err := intr.Execute(node, item)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
current, ok := result.(string)
|
|
||||||
if !ok {
|
|
||||||
return nil, errors.New("invalid type, must be string")
|
|
||||||
}
|
|
||||||
if current < bestVal {
|
|
||||||
bestVal = current
|
|
||||||
bestItem = item
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return bestItem, nil
|
|
||||||
} else {
|
|
||||||
return nil, errors.New("invalid type, must be number of string")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func jpfType(arguments []interface{}) (interface{}, error) {
|
|
||||||
arg := arguments[0]
|
|
||||||
if _, ok := arg.(float64); ok {
|
|
||||||
return "number", nil
|
|
||||||
}
|
|
||||||
if _, ok := arg.(string); ok {
|
|
||||||
return "string", nil
|
|
||||||
}
|
|
||||||
if _, ok := arg.([]interface{}); ok {
|
|
||||||
return "array", nil
|
|
||||||
}
|
|
||||||
if _, ok := arg.(map[string]interface{}); ok {
|
|
||||||
return "object", nil
|
|
||||||
}
|
|
||||||
if arg == nil {
|
|
||||||
return "null", nil
|
|
||||||
}
|
|
||||||
if arg == true || arg == false {
|
|
||||||
return "boolean", nil
|
|
||||||
}
|
|
||||||
return nil, errors.New("unknown type")
|
|
||||||
}
|
|
||||||
func jpfKeys(arguments []interface{}) (interface{}, error) {
|
|
||||||
arg := arguments[0].(map[string]interface{})
|
|
||||||
collected := make([]interface{}, 0, len(arg))
|
|
||||||
for key := range arg {
|
|
||||||
collected = append(collected, key)
|
|
||||||
}
|
|
||||||
return collected, nil
|
|
||||||
}
|
|
||||||
func jpfValues(arguments []interface{}) (interface{}, error) {
|
|
||||||
arg := arguments[0].(map[string]interface{})
|
|
||||||
collected := make([]interface{}, 0, len(arg))
|
|
||||||
for _, value := range arg {
|
|
||||||
collected = append(collected, value)
|
|
||||||
}
|
|
||||||
return collected, nil
|
|
||||||
}
|
|
||||||
func jpfSort(arguments []interface{}) (interface{}, error) {
|
|
||||||
if items, ok := toArrayNum(arguments[0]); ok {
|
|
||||||
d := sort.Float64Slice(items)
|
|
||||||
sort.Stable(d)
|
|
||||||
final := make([]interface{}, len(d))
|
|
||||||
for i, val := range d {
|
|
||||||
final[i] = val
|
|
||||||
}
|
|
||||||
return final, nil
|
|
||||||
}
|
|
||||||
// Otherwise we're dealing with sort()'ing strings.
|
|
||||||
items, _ := toArrayStr(arguments[0])
|
|
||||||
d := sort.StringSlice(items)
|
|
||||||
sort.Stable(d)
|
|
||||||
final := make([]interface{}, len(d))
|
|
||||||
for i, val := range d {
|
|
||||||
final[i] = val
|
|
||||||
}
|
|
||||||
return final, nil
|
|
||||||
}
|
|
||||||
func jpfSortBy(arguments []interface{}) (interface{}, error) {
|
|
||||||
intr := arguments[0].(*treeInterpreter)
|
|
||||||
arr := arguments[1].([]interface{})
|
|
||||||
exp := arguments[2].(expRef)
|
|
||||||
node := exp.ref
|
|
||||||
if len(arr) == 0 {
|
|
||||||
return arr, nil
|
|
||||||
} else if len(arr) == 1 {
|
|
||||||
return arr, nil
|
|
||||||
}
|
|
||||||
start, err := intr.Execute(node, arr[0])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if _, ok := start.(float64); ok {
|
|
||||||
sortable := &byExprFloat{intr, node, arr, false}
|
|
||||||
sort.Stable(sortable)
|
|
||||||
if sortable.hasError {
|
|
||||||
return nil, errors.New("error in sort_by comparison")
|
|
||||||
}
|
|
||||||
return arr, nil
|
|
||||||
} else if _, ok := start.(string); ok {
|
|
||||||
sortable := &byExprString{intr, node, arr, false}
|
|
||||||
sort.Stable(sortable)
|
|
||||||
if sortable.hasError {
|
|
||||||
return nil, errors.New("error in sort_by comparison")
|
|
||||||
}
|
|
||||||
return arr, nil
|
|
||||||
} else {
|
|
||||||
return nil, errors.New("invalid type, must be number of string")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
func jpfJoin(arguments []interface{}) (interface{}, error) {
|
|
||||||
sep := arguments[0].(string)
|
|
||||||
// We can't just do arguments[1].([]string), we have to
|
|
||||||
// manually convert each item to a string.
|
|
||||||
arrayStr := []string{}
|
|
||||||
for _, item := range arguments[1].([]interface{}) {
|
|
||||||
arrayStr = append(arrayStr, item.(string))
|
|
||||||
}
|
|
||||||
return strings.Join(arrayStr, sep), nil
|
|
||||||
}
|
|
||||||
func jpfReverse(arguments []interface{}) (interface{}, error) {
|
|
||||||
if s, ok := arguments[0].(string); ok {
|
|
||||||
r := []rune(s)
|
|
||||||
for i, j := 0, len(r)-1; i < len(r)/2; i, j = i+1, j-1 {
|
|
||||||
r[i], r[j] = r[j], r[i]
|
|
||||||
}
|
|
||||||
return string(r), nil
|
|
||||||
}
|
|
||||||
items := arguments[0].([]interface{})
|
|
||||||
length := len(items)
|
|
||||||
reversed := make([]interface{}, length)
|
|
||||||
for i, item := range items {
|
|
||||||
reversed[length-(i+1)] = item
|
|
||||||
}
|
|
||||||
return reversed, nil
|
|
||||||
}
|
|
||||||
func jpfToArray(arguments []interface{}) (interface{}, error) {
|
|
||||||
if _, ok := arguments[0].([]interface{}); ok {
|
|
||||||
return arguments[0], nil
|
|
||||||
}
|
|
||||||
return arguments[:1:1], nil
|
|
||||||
}
|
|
||||||
func jpfToString(arguments []interface{}) (interface{}, error) {
|
|
||||||
if v, ok := arguments[0].(string); ok {
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
result, err := json.Marshal(arguments[0])
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return string(result), nil
|
|
||||||
}
|
|
||||||
func jpfToNumber(arguments []interface{}) (interface{}, error) {
|
|
||||||
arg := arguments[0]
|
|
||||||
if v, ok := arg.(float64); ok {
|
|
||||||
return v, nil
|
|
||||||
}
|
|
||||||
if v, ok := arg.(string); ok {
|
|
||||||
conv, err := strconv.ParseFloat(v, 64)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
return conv, nil
|
|
||||||
}
|
|
||||||
if _, ok := arg.([]interface{}); ok {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
if _, ok := arg.(map[string]interface{}); ok {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
if arg == nil {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
if arg == true || arg == false {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
return nil, errors.New("unknown type")
|
|
||||||
}
|
|
||||||
func jpfNotNull(arguments []interface{}) (interface{}, error) {
|
|
||||||
for _, arg := range arguments {
|
|
||||||
if arg != nil {
|
|
||||||
return arg, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
418
vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/interpreter.go
generated
vendored
418
vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/interpreter.go
generated
vendored
@@ -1,418 +0,0 @@
|
|||||||
package jmespath
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"reflect"
|
|
||||||
"unicode"
|
|
||||||
"unicode/utf8"
|
|
||||||
)
|
|
||||||
|
|
||||||
/* This is a tree based interpreter. It walks the AST and directly
|
|
||||||
interprets the AST to search through a JSON document.
|
|
||||||
*/
|
|
||||||
|
|
||||||
type treeInterpreter struct {
|
|
||||||
fCall *functionCaller
|
|
||||||
}
|
|
||||||
|
|
||||||
func newInterpreter() *treeInterpreter {
|
|
||||||
interpreter := treeInterpreter{}
|
|
||||||
interpreter.fCall = newFunctionCaller()
|
|
||||||
return &interpreter
|
|
||||||
}
|
|
||||||
|
|
||||||
type expRef struct {
|
|
||||||
ref ASTNode
|
|
||||||
}
|
|
||||||
|
|
||||||
// Execute takes an ASTNode and input data and interprets the AST directly.
|
|
||||||
// It will produce the result of applying the JMESPath expression associated
|
|
||||||
// with the ASTNode to the input data "value".
|
|
||||||
func (intr *treeInterpreter) Execute(node ASTNode, value interface{}) (interface{}, error) {
|
|
||||||
switch node.nodeType {
|
|
||||||
case ASTComparator:
|
|
||||||
left, err := intr.Execute(node.children[0], value)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
right, err := intr.Execute(node.children[1], value)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
switch node.value {
|
|
||||||
case tEQ:
|
|
||||||
return objsEqual(left, right), nil
|
|
||||||
case tNE:
|
|
||||||
return !objsEqual(left, right), nil
|
|
||||||
}
|
|
||||||
leftNum, ok := left.(float64)
|
|
||||||
if !ok {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
rightNum, ok := right.(float64)
|
|
||||||
if !ok {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
switch node.value {
|
|
||||||
case tGT:
|
|
||||||
return leftNum > rightNum, nil
|
|
||||||
case tGTE:
|
|
||||||
return leftNum >= rightNum, nil
|
|
||||||
case tLT:
|
|
||||||
return leftNum < rightNum, nil
|
|
||||||
case tLTE:
|
|
||||||
return leftNum <= rightNum, nil
|
|
||||||
}
|
|
||||||
case ASTExpRef:
|
|
||||||
return expRef{ref: node.children[0]}, nil
|
|
||||||
case ASTFunctionExpression:
|
|
||||||
resolvedArgs := []interface{}{}
|
|
||||||
for _, arg := range node.children {
|
|
||||||
current, err := intr.Execute(arg, value)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
resolvedArgs = append(resolvedArgs, current)
|
|
||||||
}
|
|
||||||
return intr.fCall.CallFunction(node.value.(string), resolvedArgs, intr)
|
|
||||||
case ASTField:
|
|
||||||
if m, ok := value.(map[string]interface{}); ok {
|
|
||||||
key := node.value.(string)
|
|
||||||
return m[key], nil
|
|
||||||
}
|
|
||||||
return intr.fieldFromStruct(node.value.(string), value)
|
|
||||||
case ASTFilterProjection:
|
|
||||||
left, err := intr.Execute(node.children[0], value)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
sliceType, ok := left.([]interface{})
|
|
||||||
if !ok {
|
|
||||||
if isSliceType(left) {
|
|
||||||
return intr.filterProjectionWithReflection(node, left)
|
|
||||||
}
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
compareNode := node.children[2]
|
|
||||||
collected := []interface{}{}
|
|
||||||
for _, element := range sliceType {
|
|
||||||
result, err := intr.Execute(compareNode, element)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if !isFalse(result) {
|
|
||||||
current, err := intr.Execute(node.children[1], element)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if current != nil {
|
|
||||||
collected = append(collected, current)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return collected, nil
|
|
||||||
case ASTFlatten:
|
|
||||||
left, err := intr.Execute(node.children[0], value)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
sliceType, ok := left.([]interface{})
|
|
||||||
if !ok {
|
|
||||||
// If we can't type convert to []interface{}, there's
|
|
||||||
// a chance this could still work via reflection if we're
|
|
||||||
// dealing with user provided types.
|
|
||||||
if isSliceType(left) {
|
|
||||||
return intr.flattenWithReflection(left)
|
|
||||||
}
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
flattened := []interface{}{}
|
|
||||||
for _, element := range sliceType {
|
|
||||||
if elementSlice, ok := element.([]interface{}); ok {
|
|
||||||
flattened = append(flattened, elementSlice...)
|
|
||||||
} else if isSliceType(element) {
|
|
||||||
reflectFlat := []interface{}{}
|
|
||||||
v := reflect.ValueOf(element)
|
|
||||||
for i := 0; i < v.Len(); i++ {
|
|
||||||
reflectFlat = append(reflectFlat, v.Index(i).Interface())
|
|
||||||
}
|
|
||||||
flattened = append(flattened, reflectFlat...)
|
|
||||||
} else {
|
|
||||||
flattened = append(flattened, element)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return flattened, nil
|
|
||||||
case ASTIdentity, ASTCurrentNode:
|
|
||||||
return value, nil
|
|
||||||
case ASTIndex:
|
|
||||||
if sliceType, ok := value.([]interface{}); ok {
|
|
||||||
index := node.value.(int)
|
|
||||||
if index < 0 {
|
|
||||||
index += len(sliceType)
|
|
||||||
}
|
|
||||||
if index < len(sliceType) && index >= 0 {
|
|
||||||
return sliceType[index], nil
|
|
||||||
}
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
// Otherwise try via reflection.
|
|
||||||
rv := reflect.ValueOf(value)
|
|
||||||
if rv.Kind() == reflect.Slice {
|
|
||||||
index := node.value.(int)
|
|
||||||
if index < 0 {
|
|
||||||
index += rv.Len()
|
|
||||||
}
|
|
||||||
if index < rv.Len() && index >= 0 {
|
|
||||||
v := rv.Index(index)
|
|
||||||
return v.Interface(), nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, nil
|
|
||||||
case ASTKeyValPair:
|
|
||||||
return intr.Execute(node.children[0], value)
|
|
||||||
case ASTLiteral:
|
|
||||||
return node.value, nil
|
|
||||||
case ASTMultiSelectHash:
|
|
||||||
if value == nil {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
collected := make(map[string]interface{})
|
|
||||||
for _, child := range node.children {
|
|
||||||
current, err := intr.Execute(child, value)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
key := child.value.(string)
|
|
||||||
collected[key] = current
|
|
||||||
}
|
|
||||||
return collected, nil
|
|
||||||
case ASTMultiSelectList:
|
|
||||||
if value == nil {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
collected := []interface{}{}
|
|
||||||
for _, child := range node.children {
|
|
||||||
current, err := intr.Execute(child, value)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
collected = append(collected, current)
|
|
||||||
}
|
|
||||||
return collected, nil
|
|
||||||
case ASTOrExpression:
|
|
||||||
matched, err := intr.Execute(node.children[0], value)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if isFalse(matched) {
|
|
||||||
matched, err = intr.Execute(node.children[1], value)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return matched, nil
|
|
||||||
case ASTAndExpression:
|
|
||||||
matched, err := intr.Execute(node.children[0], value)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if isFalse(matched) {
|
|
||||||
return matched, nil
|
|
||||||
}
|
|
||||||
return intr.Execute(node.children[1], value)
|
|
||||||
case ASTNotExpression:
|
|
||||||
matched, err := intr.Execute(node.children[0], value)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if isFalse(matched) {
|
|
||||||
return true, nil
|
|
||||||
}
|
|
||||||
return false, nil
|
|
||||||
case ASTPipe:
|
|
||||||
result := value
|
|
||||||
var err error
|
|
||||||
for _, child := range node.children {
|
|
||||||
result, err = intr.Execute(child, result)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return result, nil
|
|
||||||
case ASTProjection:
|
|
||||||
left, err := intr.Execute(node.children[0], value)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
sliceType, ok := left.([]interface{})
|
|
||||||
if !ok {
|
|
||||||
if isSliceType(left) {
|
|
||||||
return intr.projectWithReflection(node, left)
|
|
||||||
}
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
collected := []interface{}{}
|
|
||||||
var current interface{}
|
|
||||||
for _, element := range sliceType {
|
|
||||||
current, err = intr.Execute(node.children[1], element)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if current != nil {
|
|
||||||
collected = append(collected, current)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return collected, nil
|
|
||||||
case ASTSubexpression, ASTIndexExpression:
|
|
||||||
left, err := intr.Execute(node.children[0], value)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return intr.Execute(node.children[1], left)
|
|
||||||
case ASTSlice:
|
|
||||||
sliceType, ok := value.([]interface{})
|
|
||||||
if !ok {
|
|
||||||
if isSliceType(value) {
|
|
||||||
return intr.sliceWithReflection(node, value)
|
|
||||||
}
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
parts := node.value.([]*int)
|
|
||||||
sliceParams := make([]sliceParam, 3)
|
|
||||||
for i, part := range parts {
|
|
||||||
if part != nil {
|
|
||||||
sliceParams[i].Specified = true
|
|
||||||
sliceParams[i].N = *part
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return slice(sliceType, sliceParams)
|
|
||||||
case ASTValueProjection:
|
|
||||||
left, err := intr.Execute(node.children[0], value)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
mapType, ok := left.(map[string]interface{})
|
|
||||||
if !ok {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
values := make([]interface{}, len(mapType))
|
|
||||||
for _, value := range mapType {
|
|
||||||
values = append(values, value)
|
|
||||||
}
|
|
||||||
collected := []interface{}{}
|
|
||||||
for _, element := range values {
|
|
||||||
current, err := intr.Execute(node.children[1], element)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if current != nil {
|
|
||||||
collected = append(collected, current)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return collected, nil
|
|
||||||
}
|
|
||||||
return nil, errors.New("Unknown AST node: " + node.nodeType.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (intr *treeInterpreter) fieldFromStruct(key string, value interface{}) (interface{}, error) {
|
|
||||||
rv := reflect.ValueOf(value)
|
|
||||||
first, n := utf8.DecodeRuneInString(key)
|
|
||||||
fieldName := string(unicode.ToUpper(first)) + key[n:]
|
|
||||||
if rv.Kind() == reflect.Struct {
|
|
||||||
v := rv.FieldByName(fieldName)
|
|
||||||
if !v.IsValid() {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
return v.Interface(), nil
|
|
||||||
} else if rv.Kind() == reflect.Ptr {
|
|
||||||
// Handle multiple levels of indirection?
|
|
||||||
if rv.IsNil() {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
rv = rv.Elem()
|
|
||||||
v := rv.FieldByName(fieldName)
|
|
||||||
if !v.IsValid() {
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
return v.Interface(), nil
|
|
||||||
}
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (intr *treeInterpreter) flattenWithReflection(value interface{}) (interface{}, error) {
|
|
||||||
v := reflect.ValueOf(value)
|
|
||||||
flattened := []interface{}{}
|
|
||||||
for i := 0; i < v.Len(); i++ {
|
|
||||||
element := v.Index(i).Interface()
|
|
||||||
if reflect.TypeOf(element).Kind() == reflect.Slice {
|
|
||||||
// Then insert the contents of the element
|
|
||||||
// slice into the flattened slice,
|
|
||||||
// i.e flattened = append(flattened, mySlice...)
|
|
||||||
elementV := reflect.ValueOf(element)
|
|
||||||
for j := 0; j < elementV.Len(); j++ {
|
|
||||||
flattened = append(
|
|
||||||
flattened, elementV.Index(j).Interface())
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
flattened = append(flattened, element)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return flattened, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (intr *treeInterpreter) sliceWithReflection(node ASTNode, value interface{}) (interface{}, error) {
|
|
||||||
v := reflect.ValueOf(value)
|
|
||||||
parts := node.value.([]*int)
|
|
||||||
sliceParams := make([]sliceParam, 3)
|
|
||||||
for i, part := range parts {
|
|
||||||
if part != nil {
|
|
||||||
sliceParams[i].Specified = true
|
|
||||||
sliceParams[i].N = *part
|
|
||||||
}
|
|
||||||
}
|
|
||||||
final := []interface{}{}
|
|
||||||
for i := 0; i < v.Len(); i++ {
|
|
||||||
element := v.Index(i).Interface()
|
|
||||||
final = append(final, element)
|
|
||||||
}
|
|
||||||
return slice(final, sliceParams)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (intr *treeInterpreter) filterProjectionWithReflection(node ASTNode, value interface{}) (interface{}, error) {
|
|
||||||
compareNode := node.children[2]
|
|
||||||
collected := []interface{}{}
|
|
||||||
v := reflect.ValueOf(value)
|
|
||||||
for i := 0; i < v.Len(); i++ {
|
|
||||||
element := v.Index(i).Interface()
|
|
||||||
result, err := intr.Execute(compareNode, element)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if !isFalse(result) {
|
|
||||||
current, err := intr.Execute(node.children[1], element)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if current != nil {
|
|
||||||
collected = append(collected, current)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return collected, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (intr *treeInterpreter) projectWithReflection(node ASTNode, value interface{}) (interface{}, error) {
|
|
||||||
collected := []interface{}{}
|
|
||||||
v := reflect.ValueOf(value)
|
|
||||||
for i := 0; i < v.Len(); i++ {
|
|
||||||
element := v.Index(i).Interface()
|
|
||||||
result, err := intr.Execute(node.children[1], element)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if result != nil {
|
|
||||||
collected = append(collected, result)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return collected, nil
|
|
||||||
}
|
|
||||||
420
vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/lexer.go
generated
vendored
420
vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/lexer.go
generated
vendored
@@ -1,420 +0,0 @@
|
|||||||
package jmespath
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"unicode/utf8"
|
|
||||||
)
|
|
||||||
|
|
||||||
type token struct {
|
|
||||||
tokenType tokType
|
|
||||||
value string
|
|
||||||
position int
|
|
||||||
length int
|
|
||||||
}
|
|
||||||
|
|
||||||
type tokType int
|
|
||||||
|
|
||||||
const eof = -1
|
|
||||||
|
|
||||||
// Lexer contains information about the expression being tokenized.
|
|
||||||
type Lexer struct {
|
|
||||||
expression string // The expression provided by the user.
|
|
||||||
currentPos int // The current position in the string.
|
|
||||||
lastWidth int // The width of the current rune. This
|
|
||||||
buf bytes.Buffer // Internal buffer used for building up values.
|
|
||||||
}
|
|
||||||
|
|
||||||
// SyntaxError is the main error used whenever a lexing or parsing error occurs.
|
|
||||||
type SyntaxError struct {
|
|
||||||
msg string // Error message displayed to user
|
|
||||||
Expression string // Expression that generated a SyntaxError
|
|
||||||
Offset int // The location in the string where the error occurred
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e SyntaxError) Error() string {
|
|
||||||
// In the future, it would be good to underline the specific
|
|
||||||
// location where the error occurred.
|
|
||||||
return "SyntaxError: " + e.msg
|
|
||||||
}
|
|
||||||
|
|
||||||
// HighlightLocation will show where the syntax error occurred.
|
|
||||||
// It will place a "^" character on a line below the expression
|
|
||||||
// at the point where the syntax error occurred.
|
|
||||||
func (e SyntaxError) HighlightLocation() string {
|
|
||||||
return e.Expression + "\n" + strings.Repeat(" ", e.Offset) + "^"
|
|
||||||
}
|
|
||||||
|
|
||||||
//go:generate stringer -type=tokType
|
|
||||||
const (
|
|
||||||
tUnknown tokType = iota
|
|
||||||
tStar
|
|
||||||
tDot
|
|
||||||
tFilter
|
|
||||||
tFlatten
|
|
||||||
tLparen
|
|
||||||
tRparen
|
|
||||||
tLbracket
|
|
||||||
tRbracket
|
|
||||||
tLbrace
|
|
||||||
tRbrace
|
|
||||||
tOr
|
|
||||||
tPipe
|
|
||||||
tNumber
|
|
||||||
tUnquotedIdentifier
|
|
||||||
tQuotedIdentifier
|
|
||||||
tComma
|
|
||||||
tColon
|
|
||||||
tLT
|
|
||||||
tLTE
|
|
||||||
tGT
|
|
||||||
tGTE
|
|
||||||
tEQ
|
|
||||||
tNE
|
|
||||||
tJSONLiteral
|
|
||||||
tStringLiteral
|
|
||||||
tCurrent
|
|
||||||
tExpref
|
|
||||||
tAnd
|
|
||||||
tNot
|
|
||||||
tEOF
|
|
||||||
)
|
|
||||||
|
|
||||||
var basicTokens = map[rune]tokType{
|
|
||||||
'.': tDot,
|
|
||||||
'*': tStar,
|
|
||||||
',': tComma,
|
|
||||||
':': tColon,
|
|
||||||
'{': tLbrace,
|
|
||||||
'}': tRbrace,
|
|
||||||
']': tRbracket, // tLbracket not included because it could be "[]"
|
|
||||||
'(': tLparen,
|
|
||||||
')': tRparen,
|
|
||||||
'@': tCurrent,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Bit mask for [a-zA-Z_] shifted down 64 bits to fit in a single uint64.
|
|
||||||
// When using this bitmask just be sure to shift the rune down 64 bits
|
|
||||||
// before checking against identifierStartBits.
|
|
||||||
const identifierStartBits uint64 = 576460745995190270
|
|
||||||
|
|
||||||
// Bit mask for [a-zA-Z0-9], 128 bits -> 2 uint64s.
|
|
||||||
var identifierTrailingBits = [2]uint64{287948901175001088, 576460745995190270}
|
|
||||||
|
|
||||||
var whiteSpace = map[rune]bool{
|
|
||||||
' ': true, '\t': true, '\n': true, '\r': true,
|
|
||||||
}
|
|
||||||
|
|
||||||
func (t token) String() string {
|
|
||||||
return fmt.Sprintf("Token{%+v, %s, %d, %d}",
|
|
||||||
t.tokenType, t.value, t.position, t.length)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewLexer creates a new JMESPath lexer.
|
|
||||||
func NewLexer() *Lexer {
|
|
||||||
lexer := Lexer{}
|
|
||||||
return &lexer
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lexer *Lexer) next() rune {
|
|
||||||
if lexer.currentPos >= len(lexer.expression) {
|
|
||||||
lexer.lastWidth = 0
|
|
||||||
return eof
|
|
||||||
}
|
|
||||||
r, w := utf8.DecodeRuneInString(lexer.expression[lexer.currentPos:])
|
|
||||||
lexer.lastWidth = w
|
|
||||||
lexer.currentPos += w
|
|
||||||
return r
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lexer *Lexer) back() {
|
|
||||||
lexer.currentPos -= lexer.lastWidth
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lexer *Lexer) peek() rune {
|
|
||||||
t := lexer.next()
|
|
||||||
lexer.back()
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
// tokenize takes an expression and returns corresponding tokens.
|
|
||||||
func (lexer *Lexer) tokenize(expression string) ([]token, error) {
|
|
||||||
var tokens []token
|
|
||||||
lexer.expression = expression
|
|
||||||
lexer.currentPos = 0
|
|
||||||
lexer.lastWidth = 0
|
|
||||||
loop:
|
|
||||||
for {
|
|
||||||
r := lexer.next()
|
|
||||||
if identifierStartBits&(1<<(uint64(r)-64)) > 0 {
|
|
||||||
t := lexer.consumeUnquotedIdentifier()
|
|
||||||
tokens = append(tokens, t)
|
|
||||||
} else if val, ok := basicTokens[r]; ok {
|
|
||||||
// Basic single char token.
|
|
||||||
t := token{
|
|
||||||
tokenType: val,
|
|
||||||
value: string(r),
|
|
||||||
position: lexer.currentPos - lexer.lastWidth,
|
|
||||||
length: 1,
|
|
||||||
}
|
|
||||||
tokens = append(tokens, t)
|
|
||||||
} else if r == '-' || (r >= '0' && r <= '9') {
|
|
||||||
t := lexer.consumeNumber()
|
|
||||||
tokens = append(tokens, t)
|
|
||||||
} else if r == '[' {
|
|
||||||
t := lexer.consumeLBracket()
|
|
||||||
tokens = append(tokens, t)
|
|
||||||
} else if r == '"' {
|
|
||||||
t, err := lexer.consumeQuotedIdentifier()
|
|
||||||
if err != nil {
|
|
||||||
return tokens, err
|
|
||||||
}
|
|
||||||
tokens = append(tokens, t)
|
|
||||||
} else if r == '\'' {
|
|
||||||
t, err := lexer.consumeRawStringLiteral()
|
|
||||||
if err != nil {
|
|
||||||
return tokens, err
|
|
||||||
}
|
|
||||||
tokens = append(tokens, t)
|
|
||||||
} else if r == '`' {
|
|
||||||
t, err := lexer.consumeLiteral()
|
|
||||||
if err != nil {
|
|
||||||
return tokens, err
|
|
||||||
}
|
|
||||||
tokens = append(tokens, t)
|
|
||||||
} else if r == '|' {
|
|
||||||
t := lexer.matchOrElse(r, '|', tOr, tPipe)
|
|
||||||
tokens = append(tokens, t)
|
|
||||||
} else if r == '<' {
|
|
||||||
t := lexer.matchOrElse(r, '=', tLTE, tLT)
|
|
||||||
tokens = append(tokens, t)
|
|
||||||
} else if r == '>' {
|
|
||||||
t := lexer.matchOrElse(r, '=', tGTE, tGT)
|
|
||||||
tokens = append(tokens, t)
|
|
||||||
} else if r == '!' {
|
|
||||||
t := lexer.matchOrElse(r, '=', tNE, tNot)
|
|
||||||
tokens = append(tokens, t)
|
|
||||||
} else if r == '=' {
|
|
||||||
t := lexer.matchOrElse(r, '=', tEQ, tUnknown)
|
|
||||||
tokens = append(tokens, t)
|
|
||||||
} else if r == '&' {
|
|
||||||
t := lexer.matchOrElse(r, '&', tAnd, tExpref)
|
|
||||||
tokens = append(tokens, t)
|
|
||||||
} else if r == eof {
|
|
||||||
break loop
|
|
||||||
} else if _, ok := whiteSpace[r]; ok {
|
|
||||||
// Ignore whitespace
|
|
||||||
} else {
|
|
||||||
return tokens, lexer.syntaxError(fmt.Sprintf("Unknown char: %s", strconv.QuoteRuneToASCII(r)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
tokens = append(tokens, token{tEOF, "", len(lexer.expression), 0})
|
|
||||||
return tokens, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Consume characters until the ending rune "r" is reached.
|
|
||||||
// If the end of the expression is reached before seeing the
|
|
||||||
// terminating rune "r", then an error is returned.
|
|
||||||
// If no error occurs then the matching substring is returned.
|
|
||||||
// The returned string will not include the ending rune.
|
|
||||||
func (lexer *Lexer) consumeUntil(end rune) (string, error) {
|
|
||||||
start := lexer.currentPos
|
|
||||||
current := lexer.next()
|
|
||||||
for current != end && current != eof {
|
|
||||||
if current == '\\' && lexer.peek() != eof {
|
|
||||||
lexer.next()
|
|
||||||
}
|
|
||||||
current = lexer.next()
|
|
||||||
}
|
|
||||||
if lexer.lastWidth == 0 {
|
|
||||||
// Then we hit an EOF so we never reached the closing
|
|
||||||
// delimiter.
|
|
||||||
return "", SyntaxError{
|
|
||||||
msg: "Unclosed delimiter: " + string(end),
|
|
||||||
Expression: lexer.expression,
|
|
||||||
Offset: len(lexer.expression),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return lexer.expression[start : lexer.currentPos-lexer.lastWidth], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lexer *Lexer) consumeLiteral() (token, error) {
|
|
||||||
start := lexer.currentPos
|
|
||||||
value, err := lexer.consumeUntil('`')
|
|
||||||
if err != nil {
|
|
||||||
return token{}, err
|
|
||||||
}
|
|
||||||
value = strings.Replace(value, "\\`", "`", -1)
|
|
||||||
return token{
|
|
||||||
tokenType: tJSONLiteral,
|
|
||||||
value: value,
|
|
||||||
position: start,
|
|
||||||
length: len(value),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lexer *Lexer) consumeRawStringLiteral() (token, error) {
|
|
||||||
start := lexer.currentPos
|
|
||||||
currentIndex := start
|
|
||||||
current := lexer.next()
|
|
||||||
for current != '\'' && lexer.peek() != eof {
|
|
||||||
if current == '\\' && lexer.peek() == '\'' {
|
|
||||||
chunk := lexer.expression[currentIndex : lexer.currentPos-1]
|
|
||||||
lexer.buf.WriteString(chunk)
|
|
||||||
lexer.buf.WriteString("'")
|
|
||||||
lexer.next()
|
|
||||||
currentIndex = lexer.currentPos
|
|
||||||
}
|
|
||||||
current = lexer.next()
|
|
||||||
}
|
|
||||||
if lexer.lastWidth == 0 {
|
|
||||||
// Then we hit an EOF so we never reached the closing
|
|
||||||
// delimiter.
|
|
||||||
return token{}, SyntaxError{
|
|
||||||
msg: "Unclosed delimiter: '",
|
|
||||||
Expression: lexer.expression,
|
|
||||||
Offset: len(lexer.expression),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if currentIndex < lexer.currentPos {
|
|
||||||
lexer.buf.WriteString(lexer.expression[currentIndex : lexer.currentPos-1])
|
|
||||||
}
|
|
||||||
value := lexer.buf.String()
|
|
||||||
// Reset the buffer so it can reused again.
|
|
||||||
lexer.buf.Reset()
|
|
||||||
return token{
|
|
||||||
tokenType: tStringLiteral,
|
|
||||||
value: value,
|
|
||||||
position: start,
|
|
||||||
length: len(value),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lexer *Lexer) syntaxError(msg string) SyntaxError {
|
|
||||||
return SyntaxError{
|
|
||||||
msg: msg,
|
|
||||||
Expression: lexer.expression,
|
|
||||||
Offset: lexer.currentPos - 1,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Checks for a two char token, otherwise matches a single character
|
|
||||||
// token. This is used whenever a two char token overlaps a single
|
|
||||||
// char token, e.g. "||" -> tPipe, "|" -> tOr.
|
|
||||||
func (lexer *Lexer) matchOrElse(first rune, second rune, matchedType tokType, singleCharType tokType) token {
|
|
||||||
start := lexer.currentPos - lexer.lastWidth
|
|
||||||
nextRune := lexer.next()
|
|
||||||
var t token
|
|
||||||
if nextRune == second {
|
|
||||||
t = token{
|
|
||||||
tokenType: matchedType,
|
|
||||||
value: string(first) + string(second),
|
|
||||||
position: start,
|
|
||||||
length: 2,
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
lexer.back()
|
|
||||||
t = token{
|
|
||||||
tokenType: singleCharType,
|
|
||||||
value: string(first),
|
|
||||||
position: start,
|
|
||||||
length: 1,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lexer *Lexer) consumeLBracket() token {
|
|
||||||
// There's three options here:
|
|
||||||
// 1. A filter expression "[?"
|
|
||||||
// 2. A flatten operator "[]"
|
|
||||||
// 3. A bare rbracket "["
|
|
||||||
start := lexer.currentPos - lexer.lastWidth
|
|
||||||
nextRune := lexer.next()
|
|
||||||
var t token
|
|
||||||
if nextRune == '?' {
|
|
||||||
t = token{
|
|
||||||
tokenType: tFilter,
|
|
||||||
value: "[?",
|
|
||||||
position: start,
|
|
||||||
length: 2,
|
|
||||||
}
|
|
||||||
} else if nextRune == ']' {
|
|
||||||
t = token{
|
|
||||||
tokenType: tFlatten,
|
|
||||||
value: "[]",
|
|
||||||
position: start,
|
|
||||||
length: 2,
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
t = token{
|
|
||||||
tokenType: tLbracket,
|
|
||||||
value: "[",
|
|
||||||
position: start,
|
|
||||||
length: 1,
|
|
||||||
}
|
|
||||||
lexer.back()
|
|
||||||
}
|
|
||||||
return t
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lexer *Lexer) consumeQuotedIdentifier() (token, error) {
|
|
||||||
start := lexer.currentPos
|
|
||||||
value, err := lexer.consumeUntil('"')
|
|
||||||
if err != nil {
|
|
||||||
return token{}, err
|
|
||||||
}
|
|
||||||
var decoded string
|
|
||||||
asJSON := []byte("\"" + value + "\"")
|
|
||||||
if err := json.Unmarshal([]byte(asJSON), &decoded); err != nil {
|
|
||||||
return token{}, err
|
|
||||||
}
|
|
||||||
return token{
|
|
||||||
tokenType: tQuotedIdentifier,
|
|
||||||
value: decoded,
|
|
||||||
position: start - 1,
|
|
||||||
length: len(decoded),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lexer *Lexer) consumeUnquotedIdentifier() token {
|
|
||||||
// Consume runes until we reach the end of an unquoted
|
|
||||||
// identifier.
|
|
||||||
start := lexer.currentPos - lexer.lastWidth
|
|
||||||
for {
|
|
||||||
r := lexer.next()
|
|
||||||
if r < 0 || r > 128 || identifierTrailingBits[uint64(r)/64]&(1<<(uint64(r)%64)) == 0 {
|
|
||||||
lexer.back()
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
value := lexer.expression[start:lexer.currentPos]
|
|
||||||
return token{
|
|
||||||
tokenType: tUnquotedIdentifier,
|
|
||||||
value: value,
|
|
||||||
position: start,
|
|
||||||
length: lexer.currentPos - start,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (lexer *Lexer) consumeNumber() token {
|
|
||||||
// Consume runes until we reach something that's not a number.
|
|
||||||
start := lexer.currentPos - lexer.lastWidth
|
|
||||||
for {
|
|
||||||
r := lexer.next()
|
|
||||||
if r < '0' || r > '9' {
|
|
||||||
lexer.back()
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
value := lexer.expression[start:lexer.currentPos]
|
|
||||||
return token{
|
|
||||||
tokenType: tNumber,
|
|
||||||
value: value,
|
|
||||||
position: start,
|
|
||||||
length: lexer.currentPos - start,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
603
vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/parser.go
generated
vendored
603
vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/parser.go
generated
vendored
@@ -1,603 +0,0 @@
|
|||||||
package jmespath
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
type astNodeType int
|
|
||||||
|
|
||||||
//go:generate stringer -type astNodeType
|
|
||||||
const (
|
|
||||||
ASTEmpty astNodeType = iota
|
|
||||||
ASTComparator
|
|
||||||
ASTCurrentNode
|
|
||||||
ASTExpRef
|
|
||||||
ASTFunctionExpression
|
|
||||||
ASTField
|
|
||||||
ASTFilterProjection
|
|
||||||
ASTFlatten
|
|
||||||
ASTIdentity
|
|
||||||
ASTIndex
|
|
||||||
ASTIndexExpression
|
|
||||||
ASTKeyValPair
|
|
||||||
ASTLiteral
|
|
||||||
ASTMultiSelectHash
|
|
||||||
ASTMultiSelectList
|
|
||||||
ASTOrExpression
|
|
||||||
ASTAndExpression
|
|
||||||
ASTNotExpression
|
|
||||||
ASTPipe
|
|
||||||
ASTProjection
|
|
||||||
ASTSubexpression
|
|
||||||
ASTSlice
|
|
||||||
ASTValueProjection
|
|
||||||
)
|
|
||||||
|
|
||||||
// ASTNode represents the abstract syntax tree of a JMESPath expression.
|
|
||||||
type ASTNode struct {
|
|
||||||
nodeType astNodeType
|
|
||||||
value interface{}
|
|
||||||
children []ASTNode
|
|
||||||
}
|
|
||||||
|
|
||||||
func (node ASTNode) String() string {
|
|
||||||
return node.PrettyPrint(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrettyPrint will pretty print the parsed AST.
|
|
||||||
// The AST is an implementation detail and this pretty print
|
|
||||||
// function is provided as a convenience method to help with
|
|
||||||
// debugging. You should not rely on its output as the internal
|
|
||||||
// structure of the AST may change at any time.
|
|
||||||
func (node ASTNode) PrettyPrint(indent int) string {
|
|
||||||
spaces := strings.Repeat(" ", indent)
|
|
||||||
output := fmt.Sprintf("%s%s {\n", spaces, node.nodeType)
|
|
||||||
nextIndent := indent + 2
|
|
||||||
if node.value != nil {
|
|
||||||
if converted, ok := node.value.(fmt.Stringer); ok {
|
|
||||||
// Account for things like comparator nodes
|
|
||||||
// that are enums with a String() method.
|
|
||||||
output += fmt.Sprintf("%svalue: %s\n", strings.Repeat(" ", nextIndent), converted.String())
|
|
||||||
} else {
|
|
||||||
output += fmt.Sprintf("%svalue: %#v\n", strings.Repeat(" ", nextIndent), node.value)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
lastIndex := len(node.children)
|
|
||||||
if lastIndex > 0 {
|
|
||||||
output += fmt.Sprintf("%schildren: {\n", strings.Repeat(" ", nextIndent))
|
|
||||||
childIndent := nextIndent + 2
|
|
||||||
for _, elem := range node.children {
|
|
||||||
output += elem.PrettyPrint(childIndent)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
output += fmt.Sprintf("%s}\n", spaces)
|
|
||||||
return output
|
|
||||||
}
|
|
||||||
|
|
||||||
var bindingPowers = map[tokType]int{
|
|
||||||
tEOF: 0,
|
|
||||||
tUnquotedIdentifier: 0,
|
|
||||||
tQuotedIdentifier: 0,
|
|
||||||
tRbracket: 0,
|
|
||||||
tRparen: 0,
|
|
||||||
tComma: 0,
|
|
||||||
tRbrace: 0,
|
|
||||||
tNumber: 0,
|
|
||||||
tCurrent: 0,
|
|
||||||
tExpref: 0,
|
|
||||||
tColon: 0,
|
|
||||||
tPipe: 1,
|
|
||||||
tOr: 2,
|
|
||||||
tAnd: 3,
|
|
||||||
tEQ: 5,
|
|
||||||
tLT: 5,
|
|
||||||
tLTE: 5,
|
|
||||||
tGT: 5,
|
|
||||||
tGTE: 5,
|
|
||||||
tNE: 5,
|
|
||||||
tFlatten: 9,
|
|
||||||
tStar: 20,
|
|
||||||
tFilter: 21,
|
|
||||||
tDot: 40,
|
|
||||||
tNot: 45,
|
|
||||||
tLbrace: 50,
|
|
||||||
tLbracket: 55,
|
|
||||||
tLparen: 60,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parser holds state about the current expression being parsed.
|
|
||||||
type Parser struct {
|
|
||||||
expression string
|
|
||||||
tokens []token
|
|
||||||
index int
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewParser creates a new JMESPath parser.
|
|
||||||
func NewParser() *Parser {
|
|
||||||
p := Parser{}
|
|
||||||
return &p
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse will compile a JMESPath expression.
|
|
||||||
func (p *Parser) Parse(expression string) (ASTNode, error) {
|
|
||||||
lexer := NewLexer()
|
|
||||||
p.expression = expression
|
|
||||||
p.index = 0
|
|
||||||
tokens, err := lexer.tokenize(expression)
|
|
||||||
if err != nil {
|
|
||||||
return ASTNode{}, err
|
|
||||||
}
|
|
||||||
p.tokens = tokens
|
|
||||||
parsed, err := p.parseExpression(0)
|
|
||||||
if err != nil {
|
|
||||||
return ASTNode{}, err
|
|
||||||
}
|
|
||||||
if p.current() != tEOF {
|
|
||||||
return ASTNode{}, p.syntaxError(fmt.Sprintf(
|
|
||||||
"Unexpected token at the end of the expresssion: %s", p.current()))
|
|
||||||
}
|
|
||||||
return parsed, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Parser) parseExpression(bindingPower int) (ASTNode, error) {
|
|
||||||
var err error
|
|
||||||
leftToken := p.lookaheadToken(0)
|
|
||||||
p.advance()
|
|
||||||
leftNode, err := p.nud(leftToken)
|
|
||||||
if err != nil {
|
|
||||||
return ASTNode{}, err
|
|
||||||
}
|
|
||||||
currentToken := p.current()
|
|
||||||
for bindingPower < bindingPowers[currentToken] {
|
|
||||||
p.advance()
|
|
||||||
leftNode, err = p.led(currentToken, leftNode)
|
|
||||||
if err != nil {
|
|
||||||
return ASTNode{}, err
|
|
||||||
}
|
|
||||||
currentToken = p.current()
|
|
||||||
}
|
|
||||||
return leftNode, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Parser) parseIndexExpression() (ASTNode, error) {
|
|
||||||
if p.lookahead(0) == tColon || p.lookahead(1) == tColon {
|
|
||||||
return p.parseSliceExpression()
|
|
||||||
}
|
|
||||||
indexStr := p.lookaheadToken(0).value
|
|
||||||
parsedInt, err := strconv.Atoi(indexStr)
|
|
||||||
if err != nil {
|
|
||||||
return ASTNode{}, err
|
|
||||||
}
|
|
||||||
indexNode := ASTNode{nodeType: ASTIndex, value: parsedInt}
|
|
||||||
p.advance()
|
|
||||||
if err := p.match(tRbracket); err != nil {
|
|
||||||
return ASTNode{}, err
|
|
||||||
}
|
|
||||||
return indexNode, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Parser) parseSliceExpression() (ASTNode, error) {
|
|
||||||
parts := []*int{nil, nil, nil}
|
|
||||||
index := 0
|
|
||||||
current := p.current()
|
|
||||||
for current != tRbracket && index < 3 {
|
|
||||||
if current == tColon {
|
|
||||||
index++
|
|
||||||
p.advance()
|
|
||||||
} else if current == tNumber {
|
|
||||||
parsedInt, err := strconv.Atoi(p.lookaheadToken(0).value)
|
|
||||||
if err != nil {
|
|
||||||
return ASTNode{}, err
|
|
||||||
}
|
|
||||||
parts[index] = &parsedInt
|
|
||||||
p.advance()
|
|
||||||
} else {
|
|
||||||
return ASTNode{}, p.syntaxError(
|
|
||||||
"Expected tColon or tNumber" + ", received: " + p.current().String())
|
|
||||||
}
|
|
||||||
current = p.current()
|
|
||||||
}
|
|
||||||
if err := p.match(tRbracket); err != nil {
|
|
||||||
return ASTNode{}, err
|
|
||||||
}
|
|
||||||
return ASTNode{
|
|
||||||
nodeType: ASTSlice,
|
|
||||||
value: parts,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Parser) match(tokenType tokType) error {
|
|
||||||
if p.current() == tokenType {
|
|
||||||
p.advance()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return p.syntaxError("Expected " + tokenType.String() + ", received: " + p.current().String())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Parser) led(tokenType tokType, node ASTNode) (ASTNode, error) {
|
|
||||||
switch tokenType {
|
|
||||||
case tDot:
|
|
||||||
if p.current() != tStar {
|
|
||||||
right, err := p.parseDotRHS(bindingPowers[tDot])
|
|
||||||
return ASTNode{
|
|
||||||
nodeType: ASTSubexpression,
|
|
||||||
children: []ASTNode{node, right},
|
|
||||||
}, err
|
|
||||||
}
|
|
||||||
p.advance()
|
|
||||||
right, err := p.parseProjectionRHS(bindingPowers[tDot])
|
|
||||||
return ASTNode{
|
|
||||||
nodeType: ASTValueProjection,
|
|
||||||
children: []ASTNode{node, right},
|
|
||||||
}, err
|
|
||||||
case tPipe:
|
|
||||||
right, err := p.parseExpression(bindingPowers[tPipe])
|
|
||||||
return ASTNode{nodeType: ASTPipe, children: []ASTNode{node, right}}, err
|
|
||||||
case tOr:
|
|
||||||
right, err := p.parseExpression(bindingPowers[tOr])
|
|
||||||
return ASTNode{nodeType: ASTOrExpression, children: []ASTNode{node, right}}, err
|
|
||||||
case tAnd:
|
|
||||||
right, err := p.parseExpression(bindingPowers[tAnd])
|
|
||||||
return ASTNode{nodeType: ASTAndExpression, children: []ASTNode{node, right}}, err
|
|
||||||
case tLparen:
|
|
||||||
name := node.value
|
|
||||||
var args []ASTNode
|
|
||||||
for p.current() != tRparen {
|
|
||||||
expression, err := p.parseExpression(0)
|
|
||||||
if err != nil {
|
|
||||||
return ASTNode{}, err
|
|
||||||
}
|
|
||||||
if p.current() == tComma {
|
|
||||||
if err := p.match(tComma); err != nil {
|
|
||||||
return ASTNode{}, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
args = append(args, expression)
|
|
||||||
}
|
|
||||||
if err := p.match(tRparen); err != nil {
|
|
||||||
return ASTNode{}, err
|
|
||||||
}
|
|
||||||
return ASTNode{
|
|
||||||
nodeType: ASTFunctionExpression,
|
|
||||||
value: name,
|
|
||||||
children: args,
|
|
||||||
}, nil
|
|
||||||
case tFilter:
|
|
||||||
return p.parseFilter(node)
|
|
||||||
case tFlatten:
|
|
||||||
left := ASTNode{nodeType: ASTFlatten, children: []ASTNode{node}}
|
|
||||||
right, err := p.parseProjectionRHS(bindingPowers[tFlatten])
|
|
||||||
return ASTNode{
|
|
||||||
nodeType: ASTProjection,
|
|
||||||
children: []ASTNode{left, right},
|
|
||||||
}, err
|
|
||||||
case tEQ, tNE, tGT, tGTE, tLT, tLTE:
|
|
||||||
right, err := p.parseExpression(bindingPowers[tokenType])
|
|
||||||
if err != nil {
|
|
||||||
return ASTNode{}, err
|
|
||||||
}
|
|
||||||
return ASTNode{
|
|
||||||
nodeType: ASTComparator,
|
|
||||||
value: tokenType,
|
|
||||||
children: []ASTNode{node, right},
|
|
||||||
}, nil
|
|
||||||
case tLbracket:
|
|
||||||
tokenType := p.current()
|
|
||||||
var right ASTNode
|
|
||||||
var err error
|
|
||||||
if tokenType == tNumber || tokenType == tColon {
|
|
||||||
right, err = p.parseIndexExpression()
|
|
||||||
if err != nil {
|
|
||||||
return ASTNode{}, err
|
|
||||||
}
|
|
||||||
return p.projectIfSlice(node, right)
|
|
||||||
}
|
|
||||||
// Otherwise this is a projection.
|
|
||||||
if err := p.match(tStar); err != nil {
|
|
||||||
return ASTNode{}, err
|
|
||||||
}
|
|
||||||
if err := p.match(tRbracket); err != nil {
|
|
||||||
return ASTNode{}, err
|
|
||||||
}
|
|
||||||
right, err = p.parseProjectionRHS(bindingPowers[tStar])
|
|
||||||
if err != nil {
|
|
||||||
return ASTNode{}, err
|
|
||||||
}
|
|
||||||
return ASTNode{
|
|
||||||
nodeType: ASTProjection,
|
|
||||||
children: []ASTNode{node, right},
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
return ASTNode{}, p.syntaxError("Unexpected token: " + tokenType.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Parser) nud(token token) (ASTNode, error) {
|
|
||||||
switch token.tokenType {
|
|
||||||
case tJSONLiteral:
|
|
||||||
var parsed interface{}
|
|
||||||
err := json.Unmarshal([]byte(token.value), &parsed)
|
|
||||||
if err != nil {
|
|
||||||
return ASTNode{}, err
|
|
||||||
}
|
|
||||||
return ASTNode{nodeType: ASTLiteral, value: parsed}, nil
|
|
||||||
case tStringLiteral:
|
|
||||||
return ASTNode{nodeType: ASTLiteral, value: token.value}, nil
|
|
||||||
case tUnquotedIdentifier:
|
|
||||||
return ASTNode{
|
|
||||||
nodeType: ASTField,
|
|
||||||
value: token.value,
|
|
||||||
}, nil
|
|
||||||
case tQuotedIdentifier:
|
|
||||||
node := ASTNode{nodeType: ASTField, value: token.value}
|
|
||||||
if p.current() == tLparen {
|
|
||||||
return ASTNode{}, p.syntaxErrorToken("Can't have quoted identifier as function name.", token)
|
|
||||||
}
|
|
||||||
return node, nil
|
|
||||||
case tStar:
|
|
||||||
left := ASTNode{nodeType: ASTIdentity}
|
|
||||||
var right ASTNode
|
|
||||||
var err error
|
|
||||||
if p.current() == tRbracket {
|
|
||||||
right = ASTNode{nodeType: ASTIdentity}
|
|
||||||
} else {
|
|
||||||
right, err = p.parseProjectionRHS(bindingPowers[tStar])
|
|
||||||
}
|
|
||||||
return ASTNode{nodeType: ASTValueProjection, children: []ASTNode{left, right}}, err
|
|
||||||
case tFilter:
|
|
||||||
return p.parseFilter(ASTNode{nodeType: ASTIdentity})
|
|
||||||
case tLbrace:
|
|
||||||
return p.parseMultiSelectHash()
|
|
||||||
case tFlatten:
|
|
||||||
left := ASTNode{
|
|
||||||
nodeType: ASTFlatten,
|
|
||||||
children: []ASTNode{ASTNode{nodeType: ASTIdentity}},
|
|
||||||
}
|
|
||||||
right, err := p.parseProjectionRHS(bindingPowers[tFlatten])
|
|
||||||
if err != nil {
|
|
||||||
return ASTNode{}, err
|
|
||||||
}
|
|
||||||
return ASTNode{nodeType: ASTProjection, children: []ASTNode{left, right}}, nil
|
|
||||||
case tLbracket:
|
|
||||||
tokenType := p.current()
|
|
||||||
//var right ASTNode
|
|
||||||
if tokenType == tNumber || tokenType == tColon {
|
|
||||||
right, err := p.parseIndexExpression()
|
|
||||||
if err != nil {
|
|
||||||
return ASTNode{}, nil
|
|
||||||
}
|
|
||||||
return p.projectIfSlice(ASTNode{nodeType: ASTIdentity}, right)
|
|
||||||
} else if tokenType == tStar && p.lookahead(1) == tRbracket {
|
|
||||||
p.advance()
|
|
||||||
p.advance()
|
|
||||||
right, err := p.parseProjectionRHS(bindingPowers[tStar])
|
|
||||||
if err != nil {
|
|
||||||
return ASTNode{}, err
|
|
||||||
}
|
|
||||||
return ASTNode{
|
|
||||||
nodeType: ASTProjection,
|
|
||||||
children: []ASTNode{ASTNode{nodeType: ASTIdentity}, right},
|
|
||||||
}, nil
|
|
||||||
} else {
|
|
||||||
return p.parseMultiSelectList()
|
|
||||||
}
|
|
||||||
case tCurrent:
|
|
||||||
return ASTNode{nodeType: ASTCurrentNode}, nil
|
|
||||||
case tExpref:
|
|
||||||
expression, err := p.parseExpression(bindingPowers[tExpref])
|
|
||||||
if err != nil {
|
|
||||||
return ASTNode{}, err
|
|
||||||
}
|
|
||||||
return ASTNode{nodeType: ASTExpRef, children: []ASTNode{expression}}, nil
|
|
||||||
case tNot:
|
|
||||||
expression, err := p.parseExpression(bindingPowers[tNot])
|
|
||||||
if err != nil {
|
|
||||||
return ASTNode{}, err
|
|
||||||
}
|
|
||||||
return ASTNode{nodeType: ASTNotExpression, children: []ASTNode{expression}}, nil
|
|
||||||
case tLparen:
|
|
||||||
expression, err := p.parseExpression(0)
|
|
||||||
if err != nil {
|
|
||||||
return ASTNode{}, err
|
|
||||||
}
|
|
||||||
if err := p.match(tRparen); err != nil {
|
|
||||||
return ASTNode{}, err
|
|
||||||
}
|
|
||||||
return expression, nil
|
|
||||||
case tEOF:
|
|
||||||
return ASTNode{}, p.syntaxErrorToken("Incomplete expression", token)
|
|
||||||
}
|
|
||||||
|
|
||||||
return ASTNode{}, p.syntaxErrorToken("Invalid token: "+token.tokenType.String(), token)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Parser) parseMultiSelectList() (ASTNode, error) {
|
|
||||||
var expressions []ASTNode
|
|
||||||
for {
|
|
||||||
expression, err := p.parseExpression(0)
|
|
||||||
if err != nil {
|
|
||||||
return ASTNode{}, err
|
|
||||||
}
|
|
||||||
expressions = append(expressions, expression)
|
|
||||||
if p.current() == tRbracket {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
err = p.match(tComma)
|
|
||||||
if err != nil {
|
|
||||||
return ASTNode{}, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
err := p.match(tRbracket)
|
|
||||||
if err != nil {
|
|
||||||
return ASTNode{}, err
|
|
||||||
}
|
|
||||||
return ASTNode{
|
|
||||||
nodeType: ASTMultiSelectList,
|
|
||||||
children: expressions,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Parser) parseMultiSelectHash() (ASTNode, error) {
|
|
||||||
var children []ASTNode
|
|
||||||
for {
|
|
||||||
keyToken := p.lookaheadToken(0)
|
|
||||||
if err := p.match(tUnquotedIdentifier); err != nil {
|
|
||||||
if err := p.match(tQuotedIdentifier); err != nil {
|
|
||||||
return ASTNode{}, p.syntaxError("Expected tQuotedIdentifier or tUnquotedIdentifier")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
keyName := keyToken.value
|
|
||||||
err := p.match(tColon)
|
|
||||||
if err != nil {
|
|
||||||
return ASTNode{}, err
|
|
||||||
}
|
|
||||||
value, err := p.parseExpression(0)
|
|
||||||
if err != nil {
|
|
||||||
return ASTNode{}, err
|
|
||||||
}
|
|
||||||
node := ASTNode{
|
|
||||||
nodeType: ASTKeyValPair,
|
|
||||||
value: keyName,
|
|
||||||
children: []ASTNode{value},
|
|
||||||
}
|
|
||||||
children = append(children, node)
|
|
||||||
if p.current() == tComma {
|
|
||||||
err := p.match(tComma)
|
|
||||||
if err != nil {
|
|
||||||
return ASTNode{}, nil
|
|
||||||
}
|
|
||||||
} else if p.current() == tRbrace {
|
|
||||||
err := p.match(tRbrace)
|
|
||||||
if err != nil {
|
|
||||||
return ASTNode{}, nil
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ASTNode{
|
|
||||||
nodeType: ASTMultiSelectHash,
|
|
||||||
children: children,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Parser) projectIfSlice(left ASTNode, right ASTNode) (ASTNode, error) {
|
|
||||||
indexExpr := ASTNode{
|
|
||||||
nodeType: ASTIndexExpression,
|
|
||||||
children: []ASTNode{left, right},
|
|
||||||
}
|
|
||||||
if right.nodeType == ASTSlice {
|
|
||||||
right, err := p.parseProjectionRHS(bindingPowers[tStar])
|
|
||||||
return ASTNode{
|
|
||||||
nodeType: ASTProjection,
|
|
||||||
children: []ASTNode{indexExpr, right},
|
|
||||||
}, err
|
|
||||||
}
|
|
||||||
return indexExpr, nil
|
|
||||||
}
|
|
||||||
func (p *Parser) parseFilter(node ASTNode) (ASTNode, error) {
|
|
||||||
var right, condition ASTNode
|
|
||||||
var err error
|
|
||||||
condition, err = p.parseExpression(0)
|
|
||||||
if err != nil {
|
|
||||||
return ASTNode{}, err
|
|
||||||
}
|
|
||||||
if err := p.match(tRbracket); err != nil {
|
|
||||||
return ASTNode{}, err
|
|
||||||
}
|
|
||||||
if p.current() == tFlatten {
|
|
||||||
right = ASTNode{nodeType: ASTIdentity}
|
|
||||||
} else {
|
|
||||||
right, err = p.parseProjectionRHS(bindingPowers[tFilter])
|
|
||||||
if err != nil {
|
|
||||||
return ASTNode{}, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return ASTNode{
|
|
||||||
nodeType: ASTFilterProjection,
|
|
||||||
children: []ASTNode{node, right, condition},
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Parser) parseDotRHS(bindingPower int) (ASTNode, error) {
|
|
||||||
lookahead := p.current()
|
|
||||||
if tokensOneOf([]tokType{tQuotedIdentifier, tUnquotedIdentifier, tStar}, lookahead) {
|
|
||||||
return p.parseExpression(bindingPower)
|
|
||||||
} else if lookahead == tLbracket {
|
|
||||||
if err := p.match(tLbracket); err != nil {
|
|
||||||
return ASTNode{}, err
|
|
||||||
}
|
|
||||||
return p.parseMultiSelectList()
|
|
||||||
} else if lookahead == tLbrace {
|
|
||||||
if err := p.match(tLbrace); err != nil {
|
|
||||||
return ASTNode{}, err
|
|
||||||
}
|
|
||||||
return p.parseMultiSelectHash()
|
|
||||||
}
|
|
||||||
return ASTNode{}, p.syntaxError("Expected identifier, lbracket, or lbrace")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Parser) parseProjectionRHS(bindingPower int) (ASTNode, error) {
|
|
||||||
current := p.current()
|
|
||||||
if bindingPowers[current] < 10 {
|
|
||||||
return ASTNode{nodeType: ASTIdentity}, nil
|
|
||||||
} else if current == tLbracket {
|
|
||||||
return p.parseExpression(bindingPower)
|
|
||||||
} else if current == tFilter {
|
|
||||||
return p.parseExpression(bindingPower)
|
|
||||||
} else if current == tDot {
|
|
||||||
err := p.match(tDot)
|
|
||||||
if err != nil {
|
|
||||||
return ASTNode{}, err
|
|
||||||
}
|
|
||||||
return p.parseDotRHS(bindingPower)
|
|
||||||
} else {
|
|
||||||
return ASTNode{}, p.syntaxError("Error")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Parser) lookahead(number int) tokType {
|
|
||||||
return p.lookaheadToken(number).tokenType
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Parser) current() tokType {
|
|
||||||
return p.lookahead(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Parser) lookaheadToken(number int) token {
|
|
||||||
return p.tokens[p.index+number]
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Parser) advance() {
|
|
||||||
p.index++
|
|
||||||
}
|
|
||||||
|
|
||||||
func tokensOneOf(elements []tokType, token tokType) bool {
|
|
||||||
for _, elem := range elements {
|
|
||||||
if elem == token {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *Parser) syntaxError(msg string) SyntaxError {
|
|
||||||
return SyntaxError{
|
|
||||||
msg: msg,
|
|
||||||
Expression: p.expression,
|
|
||||||
Offset: p.lookaheadToken(0).position,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create a SyntaxError based on the provided token.
|
|
||||||
// This differs from syntaxError() which creates a SyntaxError
|
|
||||||
// based on the current lookahead token.
|
|
||||||
func (p *Parser) syntaxErrorToken(msg string, t token) SyntaxError {
|
|
||||||
return SyntaxError{
|
|
||||||
msg: msg,
|
|
||||||
Expression: p.expression,
|
|
||||||
Offset: t.position,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,16 +0,0 @@
|
|||||||
// generated by stringer -type=tokType; DO NOT EDIT
|
|
||||||
|
|
||||||
package jmespath
|
|
||||||
|
|
||||||
import "fmt"
|
|
||||||
|
|
||||||
const _tokType_name = "tUnknowntStartDottFiltertFlattentLparentRparentLbrackettRbrackettLbracetRbracetOrtPipetNumbertUnquotedIdentifiertQuotedIdentifiertCommatColontLTtLTEtGTtGTEtEQtNEtJSONLiteraltStringLiteraltCurrenttExpreftAndtNottEOF"
|
|
||||||
|
|
||||||
var _tokType_index = [...]uint8{0, 8, 13, 17, 24, 32, 39, 46, 55, 64, 71, 78, 81, 86, 93, 112, 129, 135, 141, 144, 148, 151, 155, 158, 161, 173, 187, 195, 202, 206, 210, 214}
|
|
||||||
|
|
||||||
func (i tokType) String() string {
|
|
||||||
if i < 0 || i >= tokType(len(_tokType_index)-1) {
|
|
||||||
return fmt.Sprintf("tokType(%d)", i)
|
|
||||||
}
|
|
||||||
return _tokType_name[_tokType_index[i]:_tokType_index[i+1]]
|
|
||||||
}
|
|
||||||
185
vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/util.go
generated
vendored
185
vendor/github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath/util.go
generated
vendored
@@ -1,185 +0,0 @@
|
|||||||
package jmespath
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"reflect"
|
|
||||||
)
|
|
||||||
|
|
||||||
// IsFalse determines if an object is false based on the JMESPath spec.
|
|
||||||
// JMESPath defines false values to be any of:
|
|
||||||
// - An empty string array, or hash.
|
|
||||||
// - The boolean value false.
|
|
||||||
// - nil
|
|
||||||
func isFalse(value interface{}) bool {
|
|
||||||
switch v := value.(type) {
|
|
||||||
case bool:
|
|
||||||
return !v
|
|
||||||
case []interface{}:
|
|
||||||
return len(v) == 0
|
|
||||||
case map[string]interface{}:
|
|
||||||
return len(v) == 0
|
|
||||||
case string:
|
|
||||||
return len(v) == 0
|
|
||||||
case nil:
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
// Try the reflection cases before returning false.
|
|
||||||
rv := reflect.ValueOf(value)
|
|
||||||
switch rv.Kind() {
|
|
||||||
case reflect.Struct:
|
|
||||||
// A struct type will never be false, even if
|
|
||||||
// all of its values are the zero type.
|
|
||||||
return false
|
|
||||||
case reflect.Slice, reflect.Map:
|
|
||||||
return rv.Len() == 0
|
|
||||||
case reflect.Ptr:
|
|
||||||
if rv.IsNil() {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
// If it's a pointer type, we'll try to deref the pointer
|
|
||||||
// and evaluate the pointer value for isFalse.
|
|
||||||
element := rv.Elem()
|
|
||||||
return isFalse(element.Interface())
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// ObjsEqual is a generic object equality check.
|
|
||||||
// It will take two arbitrary objects and recursively determine
|
|
||||||
// if they are equal.
|
|
||||||
func objsEqual(left interface{}, right interface{}) bool {
|
|
||||||
return reflect.DeepEqual(left, right)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SliceParam refers to a single part of a slice.
|
|
||||||
// A slice consists of a start, a stop, and a step, similar to
|
|
||||||
// python slices.
|
|
||||||
type sliceParam struct {
|
|
||||||
N int
|
|
||||||
Specified bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// Slice supports [start:stop:step] style slicing that's supported in JMESPath.
|
|
||||||
func slice(slice []interface{}, parts []sliceParam) ([]interface{}, error) {
|
|
||||||
computed, err := computeSliceParams(len(slice), parts)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
start, stop, step := computed[0], computed[1], computed[2]
|
|
||||||
result := []interface{}{}
|
|
||||||
if step > 0 {
|
|
||||||
for i := start; i < stop; i += step {
|
|
||||||
result = append(result, slice[i])
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
for i := start; i > stop; i += step {
|
|
||||||
result = append(result, slice[i])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return result, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func computeSliceParams(length int, parts []sliceParam) ([]int, error) {
|
|
||||||
var start, stop, step int
|
|
||||||
if !parts[2].Specified {
|
|
||||||
step = 1
|
|
||||||
} else if parts[2].N == 0 {
|
|
||||||
return nil, errors.New("Invalid slice, step cannot be 0")
|
|
||||||
} else {
|
|
||||||
step = parts[2].N
|
|
||||||
}
|
|
||||||
var stepValueNegative bool
|
|
||||||
if step < 0 {
|
|
||||||
stepValueNegative = true
|
|
||||||
} else {
|
|
||||||
stepValueNegative = false
|
|
||||||
}
|
|
||||||
|
|
||||||
if !parts[0].Specified {
|
|
||||||
if stepValueNegative {
|
|
||||||
start = length - 1
|
|
||||||
} else {
|
|
||||||
start = 0
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
start = capSlice(length, parts[0].N, step)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !parts[1].Specified {
|
|
||||||
if stepValueNegative {
|
|
||||||
stop = -1
|
|
||||||
} else {
|
|
||||||
stop = length
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
stop = capSlice(length, parts[1].N, step)
|
|
||||||
}
|
|
||||||
return []int{start, stop, step}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func capSlice(length int, actual int, step int) int {
|
|
||||||
if actual < 0 {
|
|
||||||
actual += length
|
|
||||||
if actual < 0 {
|
|
||||||
if step < 0 {
|
|
||||||
actual = -1
|
|
||||||
} else {
|
|
||||||
actual = 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if actual >= length {
|
|
||||||
if step < 0 {
|
|
||||||
actual = length - 1
|
|
||||||
} else {
|
|
||||||
actual = length
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return actual
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToArrayNum converts an empty interface type to a slice of float64.
|
|
||||||
// If any element in the array cannot be converted, then nil is returned
|
|
||||||
// along with a second value of false.
|
|
||||||
func toArrayNum(data interface{}) ([]float64, bool) {
|
|
||||||
// Is there a better way to do this with reflect?
|
|
||||||
if d, ok := data.([]interface{}); ok {
|
|
||||||
result := make([]float64, len(d))
|
|
||||||
for i, el := range d {
|
|
||||||
item, ok := el.(float64)
|
|
||||||
if !ok {
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
result[i] = item
|
|
||||||
}
|
|
||||||
return result, true
|
|
||||||
}
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToArrayStr converts an empty interface type to a slice of strings.
|
|
||||||
// If any element in the array cannot be converted, then nil is returned
|
|
||||||
// along with a second value of false. If the input data could be entirely
|
|
||||||
// converted, then the converted data, along with a second value of true,
|
|
||||||
// will be returned.
|
|
||||||
func toArrayStr(data interface{}) ([]string, bool) {
|
|
||||||
// Is there a better way to do this with reflect?
|
|
||||||
if d, ok := data.([]interface{}); ok {
|
|
||||||
result := make([]string, len(d))
|
|
||||||
for i, el := range d {
|
|
||||||
item, ok := el.(string)
|
|
||||||
if !ok {
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
result[i] = item
|
|
||||||
}
|
|
||||||
return result, true
|
|
||||||
}
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
func isSliceType(v interface{}) bool {
|
|
||||||
if v == nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return reflect.TypeOf(v).Kind() == reflect.Slice
|
|
||||||
}
|
|
||||||
34
vendor/github.com/emicklei/go-restful/examples/msgpack/msgpack_entity.go
generated
vendored
34
vendor/github.com/emicklei/go-restful/examples/msgpack/msgpack_entity.go
generated
vendored
@@ -1,34 +0,0 @@
|
|||||||
package restPack
|
|
||||||
|
|
||||||
import (
|
|
||||||
restful "github.com/emicklei/go-restful"
|
|
||||||
"gopkg.in/vmihailenco/msgpack.v2"
|
|
||||||
)
|
|
||||||
|
|
||||||
const MIME_MSGPACK = "application/x-msgpack" // Accept or Content-Type used in Consumes() and/or Produces()
|
|
||||||
|
|
||||||
// NewEntityAccessorMPack returns a new EntityReaderWriter for accessing MessagePack content.
|
|
||||||
// This package is not initialized with such an accessor using the MIME_MSGPACK contentType.
|
|
||||||
func NewEntityAccessorMsgPack() restful.EntityReaderWriter {
|
|
||||||
return entityMsgPackAccess{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// entityOctetAccess is a EntityReaderWriter for Octet encoding
|
|
||||||
type entityMsgPackAccess struct {
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read unmarshalls the value from byte slice and using msgpack to unmarshal
|
|
||||||
func (e entityMsgPackAccess) Read(req *restful.Request, v interface{}) error {
|
|
||||||
return msgpack.NewDecoder(req.Request.Body).Decode(v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write marshals the value to byte slice and set the Content-Type Header.
|
|
||||||
func (e entityMsgPackAccess) Write(resp *restful.Response, status int, v interface{}) error {
|
|
||||||
if v == nil {
|
|
||||||
resp.WriteHeader(status)
|
|
||||||
// do not write a nil representation
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
resp.WriteHeader(status)
|
|
||||||
return msgpack.NewEncoder(resp).Encode(v)
|
|
||||||
}
|
|
||||||
2
vendor/github.com/fsouza/go-dockerclient/.gitignore
generated
vendored
2
vendor/github.com/fsouza/go-dockerclient/.gitignore
generated
vendored
@@ -1,2 +0,0 @@
|
|||||||
# temporary symlink for testing
|
|
||||||
testing/data/symlink
|
|
||||||
27
vendor/github.com/fsouza/go-dockerclient/.travis.yml
generated
vendored
27
vendor/github.com/fsouza/go-dockerclient/.travis.yml
generated
vendored
@@ -1,27 +0,0 @@
|
|||||||
language: go
|
|
||||||
sudo: required
|
|
||||||
go:
|
|
||||||
- 1.4.2
|
|
||||||
- 1.5.3
|
|
||||||
- 1.6
|
|
||||||
- tip
|
|
||||||
os:
|
|
||||||
- linux
|
|
||||||
- osx
|
|
||||||
env:
|
|
||||||
- GOARCH=amd64 DOCKER_VERSION=1.8.3
|
|
||||||
- GOARCH=386 DOCKER_VERSION=1.8.3
|
|
||||||
- GOARCH=amd64 DOCKER_VERSION=1.9.1
|
|
||||||
- GOARCH=386 DOCKER_VERSION=1.9.1
|
|
||||||
- GOARCH=amd64 DOCKER_VERSION=1.10.3
|
|
||||||
- GOARCH=386 DOCKER_VERSION=1.10.3
|
|
||||||
install:
|
|
||||||
- travis_retry travis-scripts/install.bash
|
|
||||||
script:
|
|
||||||
- travis-scripts/run-tests.bash
|
|
||||||
services:
|
|
||||||
- docker
|
|
||||||
matrix:
|
|
||||||
fast_finish: true
|
|
||||||
allow_failures:
|
|
||||||
- go: tip
|
|
||||||
132
vendor/github.com/fsouza/go-dockerclient/AUTHORS
generated
vendored
132
vendor/github.com/fsouza/go-dockerclient/AUTHORS
generated
vendored
@@ -1,132 +0,0 @@
|
|||||||
# This is the official list of go-dockerclient authors for copyright purposes.
|
|
||||||
|
|
||||||
Abhishek Chanda <abhishek.becs@gmail.com>
|
|
||||||
Adam Bell-Hanssen <adamb@aller.no>
|
|
||||||
Adrien Kohlbecker <adrien.kohlbecker@gmail.com>
|
|
||||||
Aldrin Leal <aldrin@leal.eng.br>
|
|
||||||
Andreas Jaekle <andreas@jaekle.net>
|
|
||||||
Andrews Medina <andrewsmedina@gmail.com>
|
|
||||||
Andrey Sibiryov <kobolog@uber.com>
|
|
||||||
Andy Goldstein <andy.goldstein@redhat.com>
|
|
||||||
Antonio Murdaca <runcom@redhat.com>
|
|
||||||
Artem Sidorenko <artem@2realities.com>
|
|
||||||
Ben Marini <ben@remind101.com>
|
|
||||||
Ben McCann <benmccann.com>
|
|
||||||
Ben Parees <bparees@redhat.com>
|
|
||||||
Benno van den Berg <bennovandenberg@gmail.com>
|
|
||||||
Bradley Cicenas <bradley.cicenas@gmail.com>
|
|
||||||
Brendan Fosberry <brendan@codeship.com>
|
|
||||||
Brian Lalor <blalor@bravo5.org>
|
|
||||||
Brian P. Hamachek <brian@brianhama.com>
|
|
||||||
Brian Palmer <brianp@instructure.com>
|
|
||||||
Bryan Boreham <bjboreham@gmail.com>
|
|
||||||
Burke Libbey <burke@libbey.me>
|
|
||||||
Carlos Diaz-Padron <cpadron@mozilla.com>
|
|
||||||
Cesar Wong <cewong@redhat.com>
|
|
||||||
Cezar Sa Espinola <cezar.sa@corp.globo.com>
|
|
||||||
Cheah Chu Yeow <chuyeow@gmail.com>
|
|
||||||
cheneydeng <cheneydeng@qq.com>
|
|
||||||
Chris Bednarski <banzaimonkey@gmail.com>
|
|
||||||
CMGS <ilskdw@gmail.com>
|
|
||||||
Colin Hebert <hebert.colin@gmail.com>
|
|
||||||
Craig Jellick <craig@rancher.com>
|
|
||||||
Dan Williams <dcbw@redhat.com>
|
|
||||||
Daniel, Dao Quang Minh <dqminh89@gmail.com>
|
|
||||||
Daniel Garcia <daniel@danielgarcia.info>
|
|
||||||
Daniel Hiltgen <daniel.hiltgen@docker.com>
|
|
||||||
Darren Shepherd <darren@rancher.com>
|
|
||||||
Dave Choi <dave.choi@daumkakao.com>
|
|
||||||
David Huie <dahuie@gmail.com>
|
|
||||||
Dawn Chen <dawnchen@google.com>
|
|
||||||
Dinesh Subhraveti <dinesh@gemini-systems.net>
|
|
||||||
Drew Wells <drew.wells00@gmail.com>
|
|
||||||
Ed <edrocksit@gmail.com>
|
|
||||||
Elias G. Schneevoigt <eliasgs@gmail.com>
|
|
||||||
Erez Horev <erez.horev@elastifile.com>
|
|
||||||
Eric Anderson <anderson@copperegg.com>
|
|
||||||
Ewout Prangsma <ewout@prangsma.net>
|
|
||||||
Fabio Rehm <fgrehm@gmail.com>
|
|
||||||
Fatih Arslan <ftharsln@gmail.com>
|
|
||||||
Flavia Missi <flaviamissi@gmail.com>
|
|
||||||
Francisco Souza <f@souza.cc>
|
|
||||||
Frank Groeneveld <frank@frankgroeneveld.nl>
|
|
||||||
George Moura <gwmoura@gmail.com>
|
|
||||||
Grégoire Delattre <gregoire.delattre@gmail.com>
|
|
||||||
Guillermo Álvarez Fernández <guillermo@cientifico.net>
|
|
||||||
Harry Zhang <harryzhang@zju.edu.cn>
|
|
||||||
He Simei <hesimei@zju.edu.cn>
|
|
||||||
Ivan Mikushin <i.mikushin@gmail.com>
|
|
||||||
James Bardin <jbardin@litl.com>
|
|
||||||
James Nugent <james@jen20.com>
|
|
||||||
Jari Kolehmainen <jari.kolehmainen@digia.com>
|
|
||||||
Jason Wilder <jwilder@litl.com>
|
|
||||||
Jawher Moussa <jawher.moussa@gmail.com>
|
|
||||||
Jean-Baptiste Dalido <jeanbaptiste@appgratis.com>
|
|
||||||
Jeff Mitchell <jeffrey.mitchell@gmail.com>
|
|
||||||
Jeffrey Hulten <jhulten@gmail.com>
|
|
||||||
Jen Andre <jandre@gmail.com>
|
|
||||||
Jérôme Laurens <jeromelaurens@gmail.com>
|
|
||||||
Johan Euphrosine <proppy@google.com>
|
|
||||||
John Hughes <hughesj@visa.com>
|
|
||||||
Kamil Domanski <kamil@domanski.co>
|
|
||||||
Karan Misra <kidoman@gmail.com>
|
|
||||||
Ken Herner <chosenken@gmail.com>
|
|
||||||
Kim, Hirokuni <hirokuni.kim@kvh.co.jp>
|
|
||||||
Kyle Allan <kallan357@gmail.com>
|
|
||||||
Liron Levin <levinlir@gmail.com>
|
|
||||||
Lior Yankovich <lior@twistlock.com>
|
|
||||||
Liu Peng <vslene@gmail.com>
|
|
||||||
Lorenz Leutgeb <lorenz.leutgeb@gmail.com>
|
|
||||||
Lucas Clemente <lucas@clemente.io>
|
|
||||||
Lucas Weiblen <lucasweiblen@gmail.com>
|
|
||||||
Lyon Hill <lyondhill@gmail.com>
|
|
||||||
Mantas Matelis <mmatelis@coursera.org>
|
|
||||||
Martin Sweeney <martin@sweeney.io>
|
|
||||||
Máximo Cuadros Ortiz <mcuadros@gmail.com>
|
|
||||||
Michael Schmatz <michaelschmatz@gmail.com>
|
|
||||||
Michal Fojtik <mfojtik@redhat.com>
|
|
||||||
Mike Dillon <mike.dillon@synctree.com>
|
|
||||||
Mrunal Patel <mrunalp@gmail.com>
|
|
||||||
Nate Jones <nate@endot.org>
|
|
||||||
Nguyen Sy Thanh Son <sonnst@sigma-solutions.eu>
|
|
||||||
Nicholas Van Wiggeren <nvanwiggeren@digitalocean.com>
|
|
||||||
Nick Ethier <ncethier@gmail.com>
|
|
||||||
Omeid Matten <public@omeid.me>
|
|
||||||
Orivej Desh <orivej@gmx.fr>
|
|
||||||
Paul Bellamy <paul.a.bellamy@gmail.com>
|
|
||||||
Paul Morie <pmorie@gmail.com>
|
|
||||||
Paul Weil <pweil@redhat.com>
|
|
||||||
Peter Edge <peter.edge@gmail.com>
|
|
||||||
Peter Jihoon Kim <raingrove@gmail.com>
|
|
||||||
Phil Lu <lu@stackengine.com>
|
|
||||||
Philippe Lafoucrière <philippe.lafoucriere@tech-angels.com>
|
|
||||||
Rafe Colton <rafael.colton@gmail.com>
|
|
||||||
Rob Miller <rob@kalistra.com>
|
|
||||||
Robert Williamson <williamson.robert@gmail.com>
|
|
||||||
Roman Khlystik <roman.khlystik@gmail.com>
|
|
||||||
Salvador Gironès <salvadorgirones@gmail.com>
|
|
||||||
Sam Rijs <srijs@airpost.net>
|
|
||||||
Sami Wagiaalla <swagiaal@redhat.com>
|
|
||||||
Samuel Archambault <sarchambault@lapresse.ca>
|
|
||||||
Samuel Karp <skarp@amazon.com>
|
|
||||||
Silas Sewell <silas@sewell.org>
|
|
||||||
Simon Eskildsen <sirup@sirupsen.com>
|
|
||||||
Simon Menke <simon.menke@gmail.com>
|
|
||||||
Skolos <skolos@gopherlab.com>
|
|
||||||
Soulou <leo@unbekandt.eu>
|
|
||||||
Sridhar Ratnakumar <sridharr@activestate.com>
|
|
||||||
Summer Mousa <smousa@zenoss.com>
|
|
||||||
Sunjin Lee <styner32@gmail.com>
|
|
||||||
Tarsis Azevedo <tarsis@corp.globo.com>
|
|
||||||
Tim Schindler <tim@catalyst-zero.com>
|
|
||||||
Timothy St. Clair <tstclair@redhat.com>
|
|
||||||
Tobi Knaup <tobi@mesosphere.io>
|
|
||||||
Tom Wilkie <tom.wilkie@gmail.com>
|
|
||||||
Tonic <tonicbupt@gmail.com>
|
|
||||||
ttyh061 <ttyh061@gmail.com>
|
|
||||||
Victor Marmol <vmarmol@google.com>
|
|
||||||
Vincenzo Prignano <vincenzo.prignano@gmail.com>
|
|
||||||
Wiliam Souza <wiliamsouza83@gmail.com>
|
|
||||||
Ye Yin <eyniy@qq.com>
|
|
||||||
Yu, Zou <zouyu7@huawei.com>
|
|
||||||
Yuriy Bogdanov <chinsay@gmail.com>
|
|
||||||
6
vendor/github.com/fsouza/go-dockerclient/DOCKER-LICENSE
generated
vendored
6
vendor/github.com/fsouza/go-dockerclient/DOCKER-LICENSE
generated
vendored
@@ -1,6 +0,0 @@
|
|||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
You can find the Docker license at the following link:
|
|
||||||
https://raw.githubusercontent.com/docker/docker/master/LICENSE
|
|
||||||
22
vendor/github.com/fsouza/go-dockerclient/LICENSE
generated
vendored
22
vendor/github.com/fsouza/go-dockerclient/LICENSE
generated
vendored
@@ -1,22 +0,0 @@
|
|||||||
Copyright (c) 2016, go-dockerclient authors
|
|
||||||
All rights reserved.
|
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without
|
|
||||||
modification, are permitted provided that the following conditions are met:
|
|
||||||
|
|
||||||
* Redistributions of source code must retain the above copyright notice,
|
|
||||||
this list of conditions and the following disclaimer.
|
|
||||||
* Redistributions in binary form must reproduce the above copyright notice,
|
|
||||||
this list of conditions and the following disclaimer in the documentation
|
|
||||||
and/or other materials provided with the distribution.
|
|
||||||
|
|
||||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
|
||||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
|
||||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
|
||||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
|
||||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
|
||||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
|
||||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
|
||||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
|
||||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
|
||||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
||||||
57
vendor/github.com/fsouza/go-dockerclient/Makefile
generated
vendored
57
vendor/github.com/fsouza/go-dockerclient/Makefile
generated
vendored
@@ -1,57 +0,0 @@
|
|||||||
.PHONY: \
|
|
||||||
all \
|
|
||||||
vendor \
|
|
||||||
lint \
|
|
||||||
vet \
|
|
||||||
fmt \
|
|
||||||
fmtcheck \
|
|
||||||
pretest \
|
|
||||||
test \
|
|
||||||
integration \
|
|
||||||
cov \
|
|
||||||
clean
|
|
||||||
|
|
||||||
PKGS = . ./testing
|
|
||||||
|
|
||||||
all: test
|
|
||||||
|
|
||||||
vendor:
|
|
||||||
@ go get -v github.com/mjibson/party
|
|
||||||
party -d external -c -u
|
|
||||||
|
|
||||||
lint:
|
|
||||||
@ go get -v github.com/golang/lint/golint
|
|
||||||
@for file in $$(git ls-files '*.go' | grep -v 'external/'); do \
|
|
||||||
export output="$$(golint $${file} | grep -v 'type name will be used as docker.DockerInfo')"; \
|
|
||||||
[ -n "$${output}" ] && echo "$${output}" && export status=1; \
|
|
||||||
done; \
|
|
||||||
exit $${status:-0}
|
|
||||||
|
|
||||||
vet:
|
|
||||||
$(foreach pkg,$(PKGS),go vet $(pkg);)
|
|
||||||
|
|
||||||
fmt:
|
|
||||||
gofmt -s -w $(PKGS)
|
|
||||||
|
|
||||||
fmtcheck:
|
|
||||||
@ export output=$$(gofmt -s -d $(PKGS)); \
|
|
||||||
[ -n "$${output}" ] && echo "$${output}" && export status=1; \
|
|
||||||
exit $${status:-0}
|
|
||||||
|
|
||||||
pretest: lint vet fmtcheck
|
|
||||||
|
|
||||||
gotest:
|
|
||||||
$(foreach pkg,$(PKGS),go test $(pkg) || exit;)
|
|
||||||
|
|
||||||
test: pretest gotest
|
|
||||||
|
|
||||||
integration:
|
|
||||||
go test -tags docker_integration -run TestIntegration -v
|
|
||||||
|
|
||||||
cov:
|
|
||||||
@ go get -v github.com/axw/gocov/gocov
|
|
||||||
@ go get golang.org/x/tools/cmd/cover
|
|
||||||
gocov test | gocov report
|
|
||||||
|
|
||||||
clean:
|
|
||||||
$(foreach pkg,$(PKGS),go clean $(pkg) || exit;)
|
|
||||||
105
vendor/github.com/fsouza/go-dockerclient/README.markdown
generated
vendored
105
vendor/github.com/fsouza/go-dockerclient/README.markdown
generated
vendored
@@ -1,105 +0,0 @@
|
|||||||
# go-dockerclient
|
|
||||||
|
|
||||||
[](https://travis-ci.org/fsouza/go-dockerclient)
|
|
||||||
[](https://godoc.org/github.com/fsouza/go-dockerclient)
|
|
||||||
|
|
||||||
This package presents a client for the Docker remote API. It also provides
|
|
||||||
support for the extensions in the [Swarm API](https://docs.docker.com/swarm/swarm-api/).
|
|
||||||
|
|
||||||
This package also provides support for docker's network API, which is a simple
|
|
||||||
passthrough to the libnetwork remote API. Note that docker's network API is
|
|
||||||
only available in docker 1.8 and above, and only enabled in docker if
|
|
||||||
DOCKER_EXPERIMENTAL is defined during the docker build process.
|
|
||||||
|
|
||||||
For more details, check the [remote API documentation](http://docs.docker.com/engine/reference/api/docker_remote_api/).
|
|
||||||
|
|
||||||
## Vendoring
|
|
||||||
|
|
||||||
If you are having issues with Go 1.5 and have `GO15VENDOREXPERIMENT` set with an application that has go-dockerclient vendored,
|
|
||||||
please update your vendoring of go-dockerclient :) We recently moved the `vendor` directory to `external` so that go-dockerclient
|
|
||||||
is compatible with this configuration. See [338](https://github.com/fsouza/go-dockerclient/issues/338) and [339](https://github.com/fsouza/go-dockerclient/pull/339)
|
|
||||||
for details.
|
|
||||||
|
|
||||||
## Example
|
|
||||||
|
|
||||||
```go
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/fsouza/go-dockerclient"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
endpoint := "unix:///var/run/docker.sock"
|
|
||||||
client, _ := docker.NewClient(endpoint)
|
|
||||||
imgs, _ := client.ListImages(docker.ListImagesOptions{All: false})
|
|
||||||
for _, img := range imgs {
|
|
||||||
fmt.Println("ID: ", img.ID)
|
|
||||||
fmt.Println("RepoTags: ", img.RepoTags)
|
|
||||||
fmt.Println("Created: ", img.Created)
|
|
||||||
fmt.Println("Size: ", img.Size)
|
|
||||||
fmt.Println("VirtualSize: ", img.VirtualSize)
|
|
||||||
fmt.Println("ParentId: ", img.ParentID)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Using with TLS
|
|
||||||
|
|
||||||
In order to instantiate the client for a TLS-enabled daemon, you should use NewTLSClient, passing the endpoint and path for key and certificates as parameters.
|
|
||||||
|
|
||||||
```go
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/fsouza/go-dockerclient"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
endpoint := "tcp://[ip]:[port]"
|
|
||||||
path := os.Getenv("DOCKER_CERT_PATH")
|
|
||||||
ca := fmt.Sprintf("%s/ca.pem", path)
|
|
||||||
cert := fmt.Sprintf("%s/cert.pem", path)
|
|
||||||
key := fmt.Sprintf("%s/key.pem", path)
|
|
||||||
client, _ := docker.NewTLSClient(endpoint, cert, key, ca)
|
|
||||||
// use client
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
If using [docker-machine](https://docs.docker.com/machine/), or another application that exports environment variables
|
|
||||||
`DOCKER_HOST, DOCKER_TLS_VERIFY, DOCKER_CERT_PATH`, you can use NewClientFromEnv.
|
|
||||||
|
|
||||||
|
|
||||||
```go
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
|
|
||||||
"github.com/fsouza/go-dockerclient"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
client, _ := docker.NewClientFromEnv()
|
|
||||||
// use client
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
See the documentation for more details.
|
|
||||||
|
|
||||||
## Developing
|
|
||||||
|
|
||||||
All development commands can be seen in the [Makefile](Makefile).
|
|
||||||
|
|
||||||
Commited code must pass:
|
|
||||||
|
|
||||||
* [golint](https://github.com/golang/lint)
|
|
||||||
* [go vet](https://godoc.org/golang.org/x/tools/cmd/vet)
|
|
||||||
* [gofmt](https://golang.org/cmd/gofmt)
|
|
||||||
* [go test](https://golang.org/cmd/go/#hdr-Test_packages)
|
|
||||||
|
|
||||||
Running `make test` will check all of these. If your editor does not automatically call gofmt, `make fmt` will format all go files in this repository.
|
|
||||||
138
vendor/github.com/fsouza/go-dockerclient/auth.go
generated
vendored
138
vendor/github.com/fsouza/go-dockerclient/auth.go
generated
vendored
@@ -1,138 +0,0 @@
|
|||||||
// Copyright 2015 go-dockerclient authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package docker
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"encoding/base64"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ErrCannotParseDockercfg is the error returned by NewAuthConfigurations when the dockercfg cannot be parsed.
|
|
||||||
var ErrCannotParseDockercfg = errors.New("Failed to read authentication from dockercfg")
|
|
||||||
|
|
||||||
// AuthConfiguration represents authentication options to use in the PushImage
|
|
||||||
// method. It represents the authentication in the Docker index server.
|
|
||||||
type AuthConfiguration struct {
|
|
||||||
Username string `json:"username,omitempty"`
|
|
||||||
Password string `json:"password,omitempty"`
|
|
||||||
Email string `json:"email,omitempty"`
|
|
||||||
ServerAddress string `json:"serveraddress,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// AuthConfigurations represents authentication options to use for the
|
|
||||||
// PushImage method accommodating the new X-Registry-Config header
|
|
||||||
type AuthConfigurations struct {
|
|
||||||
Configs map[string]AuthConfiguration `json:"configs"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// AuthConfigurations119 is used to serialize a set of AuthConfigurations
|
|
||||||
// for Docker API >= 1.19.
|
|
||||||
type AuthConfigurations119 map[string]AuthConfiguration
|
|
||||||
|
|
||||||
// dockerConfig represents a registry authentation configuration from the
|
|
||||||
// .dockercfg file.
|
|
||||||
type dockerConfig struct {
|
|
||||||
Auth string `json:"auth"`
|
|
||||||
Email string `json:"email"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewAuthConfigurationsFromDockerCfg returns AuthConfigurations from the
|
|
||||||
// ~/.dockercfg file.
|
|
||||||
func NewAuthConfigurationsFromDockerCfg() (*AuthConfigurations, error) {
|
|
||||||
var r io.Reader
|
|
||||||
var err error
|
|
||||||
p := path.Join(os.Getenv("HOME"), ".docker", "config.json")
|
|
||||||
r, err = os.Open(p)
|
|
||||||
if err != nil {
|
|
||||||
p := path.Join(os.Getenv("HOME"), ".dockercfg")
|
|
||||||
r, err = os.Open(p)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return NewAuthConfigurations(r)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewAuthConfigurations returns AuthConfigurations from a JSON encoded string in the
|
|
||||||
// same format as the .dockercfg file.
|
|
||||||
func NewAuthConfigurations(r io.Reader) (*AuthConfigurations, error) {
|
|
||||||
var auth *AuthConfigurations
|
|
||||||
confs, err := parseDockerConfig(r)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
auth, err = authConfigs(confs)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return auth, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseDockerConfig(r io.Reader) (map[string]dockerConfig, error) {
|
|
||||||
buf := new(bytes.Buffer)
|
|
||||||
buf.ReadFrom(r)
|
|
||||||
byteData := buf.Bytes()
|
|
||||||
|
|
||||||
confsWrapper := struct {
|
|
||||||
Auths map[string]dockerConfig `json:"auths"`
|
|
||||||
}{}
|
|
||||||
if err := json.Unmarshal(byteData, &confsWrapper); err == nil {
|
|
||||||
if len(confsWrapper.Auths) > 0 {
|
|
||||||
return confsWrapper.Auths, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var confs map[string]dockerConfig
|
|
||||||
if err := json.Unmarshal(byteData, &confs); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return confs, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// authConfigs converts a dockerConfigs map to a AuthConfigurations object.
|
|
||||||
func authConfigs(confs map[string]dockerConfig) (*AuthConfigurations, error) {
|
|
||||||
c := &AuthConfigurations{
|
|
||||||
Configs: make(map[string]AuthConfiguration),
|
|
||||||
}
|
|
||||||
for reg, conf := range confs {
|
|
||||||
data, err := base64.StdEncoding.DecodeString(conf.Auth)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
userpass := strings.SplitN(string(data), ":", 2)
|
|
||||||
if len(userpass) != 2 {
|
|
||||||
return nil, ErrCannotParseDockercfg
|
|
||||||
}
|
|
||||||
c.Configs[reg] = AuthConfiguration{
|
|
||||||
Email: conf.Email,
|
|
||||||
Username: userpass[0],
|
|
||||||
Password: userpass[1],
|
|
||||||
ServerAddress: reg,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return c, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// AuthCheck validates the given credentials. It returns nil if successful.
|
|
||||||
//
|
|
||||||
// See https://goo.gl/m2SleN for more details.
|
|
||||||
func (c *Client) AuthCheck(conf *AuthConfiguration) error {
|
|
||||||
if conf == nil {
|
|
||||||
return fmt.Errorf("conf is nil")
|
|
||||||
}
|
|
||||||
resp, err := c.do("POST", "/auth", doOptions{data: conf})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
resp.Body.Close()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
43
vendor/github.com/fsouza/go-dockerclient/change.go
generated
vendored
43
vendor/github.com/fsouza/go-dockerclient/change.go
generated
vendored
@@ -1,43 +0,0 @@
|
|||||||
// Copyright 2014 go-dockerclient authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package docker
|
|
||||||
|
|
||||||
import "fmt"
|
|
||||||
|
|
||||||
// ChangeType is a type for constants indicating the type of change
|
|
||||||
// in a container
|
|
||||||
type ChangeType int
|
|
||||||
|
|
||||||
const (
|
|
||||||
// ChangeModify is the ChangeType for container modifications
|
|
||||||
ChangeModify ChangeType = iota
|
|
||||||
|
|
||||||
// ChangeAdd is the ChangeType for additions to a container
|
|
||||||
ChangeAdd
|
|
||||||
|
|
||||||
// ChangeDelete is the ChangeType for deletions from a container
|
|
||||||
ChangeDelete
|
|
||||||
)
|
|
||||||
|
|
||||||
// Change represents a change in a container.
|
|
||||||
//
|
|
||||||
// See https://goo.gl/9GsTIF for more details.
|
|
||||||
type Change struct {
|
|
||||||
Path string
|
|
||||||
Kind ChangeType
|
|
||||||
}
|
|
||||||
|
|
||||||
func (change *Change) String() string {
|
|
||||||
var kind string
|
|
||||||
switch change.Kind {
|
|
||||||
case ChangeModify:
|
|
||||||
kind = "C"
|
|
||||||
case ChangeAdd:
|
|
||||||
kind = "A"
|
|
||||||
case ChangeDelete:
|
|
||||||
kind = "D"
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("%s %s", kind, change.Path)
|
|
||||||
}
|
|
||||||
930
vendor/github.com/fsouza/go-dockerclient/client.go
generated
vendored
930
vendor/github.com/fsouza/go-dockerclient/client.go
generated
vendored
@@ -1,930 +0,0 @@
|
|||||||
// Copyright 2015 go-dockerclient authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package docker provides a client for the Docker remote API.
|
|
||||||
//
|
|
||||||
// See https://goo.gl/G3plxW for more details on the remote API.
|
|
||||||
package docker
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"bytes"
|
|
||||||
"crypto/tls"
|
|
||||||
"crypto/x509"
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
"net/http/httputil"
|
|
||||||
"net/url"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"reflect"
|
|
||||||
"runtime"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts"
|
|
||||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir"
|
|
||||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy"
|
|
||||||
"github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp"
|
|
||||||
)
|
|
||||||
|
|
||||||
const userAgent = "go-dockerclient"
|
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrInvalidEndpoint is returned when the endpoint is not a valid HTTP URL.
|
|
||||||
ErrInvalidEndpoint = errors.New("invalid endpoint")
|
|
||||||
|
|
||||||
// ErrConnectionRefused is returned when the client cannot connect to the given endpoint.
|
|
||||||
ErrConnectionRefused = errors.New("cannot connect to Docker endpoint")
|
|
||||||
|
|
||||||
apiVersion112, _ = NewAPIVersion("1.12")
|
|
||||||
|
|
||||||
apiVersion119, _ = NewAPIVersion("1.19")
|
|
||||||
)
|
|
||||||
|
|
||||||
// APIVersion is an internal representation of a version of the Remote API.
|
|
||||||
type APIVersion []int
|
|
||||||
|
|
||||||
// NewAPIVersion returns an instance of APIVersion for the given string.
|
|
||||||
//
|
|
||||||
// The given string must be in the form <major>.<minor>.<patch>, where <major>,
|
|
||||||
// <minor> and <patch> are integer numbers.
|
|
||||||
func NewAPIVersion(input string) (APIVersion, error) {
|
|
||||||
if !strings.Contains(input, ".") {
|
|
||||||
return nil, fmt.Errorf("Unable to parse version %q", input)
|
|
||||||
}
|
|
||||||
raw := strings.Split(input, "-")
|
|
||||||
arr := strings.Split(raw[0], ".")
|
|
||||||
ret := make(APIVersion, len(arr))
|
|
||||||
var err error
|
|
||||||
for i, val := range arr {
|
|
||||||
ret[i], err = strconv.Atoi(val)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Unable to parse version %q: %q is not an integer", input, val)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return ret, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (version APIVersion) String() string {
|
|
||||||
var str string
|
|
||||||
for i, val := range version {
|
|
||||||
str += strconv.Itoa(val)
|
|
||||||
if i < len(version)-1 {
|
|
||||||
str += "."
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return str
|
|
||||||
}
|
|
||||||
|
|
||||||
// LessThan is a function for comparing APIVersion structs
|
|
||||||
func (version APIVersion) LessThan(other APIVersion) bool {
|
|
||||||
return version.compare(other) < 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// LessThanOrEqualTo is a function for comparing APIVersion structs
|
|
||||||
func (version APIVersion) LessThanOrEqualTo(other APIVersion) bool {
|
|
||||||
return version.compare(other) <= 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// GreaterThan is a function for comparing APIVersion structs
|
|
||||||
func (version APIVersion) GreaterThan(other APIVersion) bool {
|
|
||||||
return version.compare(other) > 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// GreaterThanOrEqualTo is a function for comparing APIVersion structs
|
|
||||||
func (version APIVersion) GreaterThanOrEqualTo(other APIVersion) bool {
|
|
||||||
return version.compare(other) >= 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (version APIVersion) compare(other APIVersion) int {
|
|
||||||
for i, v := range version {
|
|
||||||
if i <= len(other)-1 {
|
|
||||||
otherVersion := other[i]
|
|
||||||
|
|
||||||
if v < otherVersion {
|
|
||||||
return -1
|
|
||||||
} else if v > otherVersion {
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if len(version) > len(other) {
|
|
||||||
return 1
|
|
||||||
}
|
|
||||||
if len(version) < len(other) {
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
return 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Client is the basic type of this package. It provides methods for
|
|
||||||
// interaction with the API.
|
|
||||||
type Client struct {
|
|
||||||
SkipServerVersionCheck bool
|
|
||||||
HTTPClient *http.Client
|
|
||||||
TLSConfig *tls.Config
|
|
||||||
Dialer *net.Dialer
|
|
||||||
|
|
||||||
endpoint string
|
|
||||||
endpointURL *url.URL
|
|
||||||
eventMonitor *eventMonitoringState
|
|
||||||
requestedAPIVersion APIVersion
|
|
||||||
serverAPIVersion APIVersion
|
|
||||||
expectedAPIVersion APIVersion
|
|
||||||
unixHTTPClient *http.Client
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewClient returns a Client instance ready for communication with the given
|
|
||||||
// server endpoint. It will use the latest remote API version available in the
|
|
||||||
// server.
|
|
||||||
func NewClient(endpoint string) (*Client, error) {
|
|
||||||
client, err := NewVersionedClient(endpoint, "")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
client.SkipServerVersionCheck = true
|
|
||||||
return client, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewTLSClient returns a Client instance ready for TLS communications with the givens
|
|
||||||
// server endpoint, key and certificates . It will use the latest remote API version
|
|
||||||
// available in the server.
|
|
||||||
func NewTLSClient(endpoint string, cert, key, ca string) (*Client, error) {
|
|
||||||
client, err := NewVersionedTLSClient(endpoint, cert, key, ca, "")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
client.SkipServerVersionCheck = true
|
|
||||||
return client, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewTLSClientFromBytes returns a Client instance ready for TLS communications with the givens
|
|
||||||
// server endpoint, key and certificates (passed inline to the function as opposed to being
|
|
||||||
// read from a local file). It will use the latest remote API version available in the server.
|
|
||||||
func NewTLSClientFromBytes(endpoint string, certPEMBlock, keyPEMBlock, caPEMCert []byte) (*Client, error) {
|
|
||||||
client, err := NewVersionedTLSClientFromBytes(endpoint, certPEMBlock, keyPEMBlock, caPEMCert, "")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
client.SkipServerVersionCheck = true
|
|
||||||
return client, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewVersionedClient returns a Client instance ready for communication with
|
|
||||||
// the given server endpoint, using a specific remote API version.
|
|
||||||
func NewVersionedClient(endpoint string, apiVersionString string) (*Client, error) {
|
|
||||||
u, err := parseEndpoint(endpoint, false)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var requestedAPIVersion APIVersion
|
|
||||||
if strings.Contains(apiVersionString, ".") {
|
|
||||||
requestedAPIVersion, err = NewAPIVersion(apiVersionString)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return &Client{
|
|
||||||
HTTPClient: cleanhttp.DefaultClient(),
|
|
||||||
Dialer: &net.Dialer{},
|
|
||||||
endpoint: endpoint,
|
|
||||||
endpointURL: u,
|
|
||||||
eventMonitor: new(eventMonitoringState),
|
|
||||||
requestedAPIVersion: requestedAPIVersion,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewVersionnedTLSClient has been DEPRECATED, please use NewVersionedTLSClient.
|
|
||||||
func NewVersionnedTLSClient(endpoint string, cert, key, ca, apiVersionString string) (*Client, error) {
|
|
||||||
return NewVersionedTLSClient(endpoint, cert, key, ca, apiVersionString)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewVersionedTLSClient returns a Client instance ready for TLS communications with the givens
|
|
||||||
// server endpoint, key and certificates, using a specific remote API version.
|
|
||||||
func NewVersionedTLSClient(endpoint string, cert, key, ca, apiVersionString string) (*Client, error) {
|
|
||||||
certPEMBlock, err := ioutil.ReadFile(cert)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
keyPEMBlock, err := ioutil.ReadFile(key)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
caPEMCert, err := ioutil.ReadFile(ca)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return NewVersionedTLSClientFromBytes(endpoint, certPEMBlock, keyPEMBlock, caPEMCert, apiVersionString)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewClientFromEnv returns a Client instance ready for communication created from
|
|
||||||
// Docker's default logic for the environment variables DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_CERT_PATH.
|
|
||||||
//
|
|
||||||
// See https://github.com/docker/docker/blob/1f963af697e8df3a78217f6fdbf67b8123a7db94/docker/docker.go#L68.
|
|
||||||
// See https://github.com/docker/compose/blob/81707ef1ad94403789166d2fe042c8a718a4c748/compose/cli/docker_client.py#L7.
|
|
||||||
func NewClientFromEnv() (*Client, error) {
|
|
||||||
client, err := NewVersionedClientFromEnv("")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
client.SkipServerVersionCheck = true
|
|
||||||
return client, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewVersionedClientFromEnv returns a Client instance ready for TLS communications created from
|
|
||||||
// Docker's default logic for the environment variables DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_CERT_PATH,
|
|
||||||
// and using a specific remote API version.
|
|
||||||
//
|
|
||||||
// See https://github.com/docker/docker/blob/1f963af697e8df3a78217f6fdbf67b8123a7db94/docker/docker.go#L68.
|
|
||||||
// See https://github.com/docker/compose/blob/81707ef1ad94403789166d2fe042c8a718a4c748/compose/cli/docker_client.py#L7.
|
|
||||||
func NewVersionedClientFromEnv(apiVersionString string) (*Client, error) {
|
|
||||||
dockerEnv, err := getDockerEnv()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
dockerHost := dockerEnv.dockerHost
|
|
||||||
if dockerEnv.dockerTLSVerify {
|
|
||||||
parts := strings.SplitN(dockerEnv.dockerHost, "://", 2)
|
|
||||||
if len(parts) != 2 {
|
|
||||||
return nil, fmt.Errorf("could not split %s into two parts by ://", dockerHost)
|
|
||||||
}
|
|
||||||
cert := filepath.Join(dockerEnv.dockerCertPath, "cert.pem")
|
|
||||||
key := filepath.Join(dockerEnv.dockerCertPath, "key.pem")
|
|
||||||
ca := filepath.Join(dockerEnv.dockerCertPath, "ca.pem")
|
|
||||||
return NewVersionedTLSClient(dockerEnv.dockerHost, cert, key, ca, apiVersionString)
|
|
||||||
}
|
|
||||||
return NewVersionedClient(dockerEnv.dockerHost, apiVersionString)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewVersionedTLSClientFromBytes returns a Client instance ready for TLS communications with the givens
|
|
||||||
// server endpoint, key and certificates (passed inline to the function as opposed to being
|
|
||||||
// read from a local file), using a specific remote API version.
|
|
||||||
func NewVersionedTLSClientFromBytes(endpoint string, certPEMBlock, keyPEMBlock, caPEMCert []byte, apiVersionString string) (*Client, error) {
|
|
||||||
u, err := parseEndpoint(endpoint, true)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
var requestedAPIVersion APIVersion
|
|
||||||
if strings.Contains(apiVersionString, ".") {
|
|
||||||
requestedAPIVersion, err = NewAPIVersion(apiVersionString)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if certPEMBlock == nil || keyPEMBlock == nil {
|
|
||||||
return nil, errors.New("Both cert and key are required")
|
|
||||||
}
|
|
||||||
tlsCert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
tlsConfig := &tls.Config{Certificates: []tls.Certificate{tlsCert}}
|
|
||||||
if caPEMCert == nil {
|
|
||||||
tlsConfig.InsecureSkipVerify = true
|
|
||||||
} else {
|
|
||||||
caPool := x509.NewCertPool()
|
|
||||||
if !caPool.AppendCertsFromPEM(caPEMCert) {
|
|
||||||
return nil, errors.New("Could not add RootCA pem")
|
|
||||||
}
|
|
||||||
tlsConfig.RootCAs = caPool
|
|
||||||
}
|
|
||||||
tr := cleanhttp.DefaultTransport()
|
|
||||||
tr.TLSClientConfig = tlsConfig
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &Client{
|
|
||||||
HTTPClient: &http.Client{Transport: tr},
|
|
||||||
TLSConfig: tlsConfig,
|
|
||||||
Dialer: &net.Dialer{},
|
|
||||||
endpoint: endpoint,
|
|
||||||
endpointURL: u,
|
|
||||||
eventMonitor: new(eventMonitoringState),
|
|
||||||
requestedAPIVersion: requestedAPIVersion,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Client) checkAPIVersion() error {
|
|
||||||
serverAPIVersionString, err := c.getServerAPIVersionString()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
c.serverAPIVersion, err = NewAPIVersion(serverAPIVersionString)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if c.requestedAPIVersion == nil {
|
|
||||||
c.expectedAPIVersion = c.serverAPIVersion
|
|
||||||
} else {
|
|
||||||
c.expectedAPIVersion = c.requestedAPIVersion
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Endpoint returns the current endpoint. It's useful for getting the endpoint
|
|
||||||
// when using functions that get this data from the environment (like
|
|
||||||
// NewClientFromEnv.
|
|
||||||
func (c *Client) Endpoint() string {
|
|
||||||
return c.endpoint
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ping pings the docker server
|
|
||||||
//
|
|
||||||
// See https://goo.gl/kQCfJj for more details.
|
|
||||||
func (c *Client) Ping() error {
|
|
||||||
path := "/_ping"
|
|
||||||
resp, err := c.do("GET", path, doOptions{})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
|
||||||
return newError(resp)
|
|
||||||
}
|
|
||||||
resp.Body.Close()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Client) getServerAPIVersionString() (version string, err error) {
|
|
||||||
resp, err := c.do("GET", "/version", doOptions{})
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
if resp.StatusCode != http.StatusOK {
|
|
||||||
return "", fmt.Errorf("Received unexpected status %d while trying to retrieve the server version", resp.StatusCode)
|
|
||||||
}
|
|
||||||
var versionResponse map[string]interface{}
|
|
||||||
if err := json.NewDecoder(resp.Body).Decode(&versionResponse); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
if version, ok := (versionResponse["ApiVersion"]).(string); ok {
|
|
||||||
return version, nil
|
|
||||||
}
|
|
||||||
return "", nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type doOptions struct {
|
|
||||||
data interface{}
|
|
||||||
forceJSON bool
|
|
||||||
headers map[string]string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Client) do(method, path string, doOptions doOptions) (*http.Response, error) {
|
|
||||||
var params io.Reader
|
|
||||||
if doOptions.data != nil || doOptions.forceJSON {
|
|
||||||
buf, err := json.Marshal(doOptions.data)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
params = bytes.NewBuffer(buf)
|
|
||||||
}
|
|
||||||
if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil {
|
|
||||||
err := c.checkAPIVersion()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
httpClient := c.HTTPClient
|
|
||||||
protocol := c.endpointURL.Scheme
|
|
||||||
var u string
|
|
||||||
if protocol == "unix" {
|
|
||||||
httpClient = c.unixClient()
|
|
||||||
u = c.getFakeUnixURL(path)
|
|
||||||
} else {
|
|
||||||
u = c.getURL(path)
|
|
||||||
}
|
|
||||||
req, err := http.NewRequest(method, u, params)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
req.Header.Set("User-Agent", userAgent)
|
|
||||||
if doOptions.data != nil {
|
|
||||||
req.Header.Set("Content-Type", "application/json")
|
|
||||||
} else if method == "POST" {
|
|
||||||
req.Header.Set("Content-Type", "plain/text")
|
|
||||||
}
|
|
||||||
|
|
||||||
for k, v := range doOptions.headers {
|
|
||||||
req.Header.Set(k, v)
|
|
||||||
}
|
|
||||||
resp, err := httpClient.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
if strings.Contains(err.Error(), "connection refused") {
|
|
||||||
return nil, ErrConnectionRefused
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
|
|
||||||
return nil, newError(resp)
|
|
||||||
}
|
|
||||||
return resp, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type streamOptions struct {
|
|
||||||
setRawTerminal bool
|
|
||||||
rawJSONStream bool
|
|
||||||
useJSONDecoder bool
|
|
||||||
headers map[string]string
|
|
||||||
in io.Reader
|
|
||||||
stdout io.Writer
|
|
||||||
stderr io.Writer
|
|
||||||
// timeout is the inital connection timeout
|
|
||||||
timeout time.Duration
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Client) stream(method, path string, streamOptions streamOptions) error {
|
|
||||||
if (method == "POST" || method == "PUT") && streamOptions.in == nil {
|
|
||||||
streamOptions.in = bytes.NewReader(nil)
|
|
||||||
}
|
|
||||||
if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil {
|
|
||||||
err := c.checkAPIVersion()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
req, err := http.NewRequest(method, c.getURL(path), streamOptions.in)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
req.Header.Set("User-Agent", userAgent)
|
|
||||||
if method == "POST" {
|
|
||||||
req.Header.Set("Content-Type", "plain/text")
|
|
||||||
}
|
|
||||||
for key, val := range streamOptions.headers {
|
|
||||||
req.Header.Set(key, val)
|
|
||||||
}
|
|
||||||
var resp *http.Response
|
|
||||||
protocol := c.endpointURL.Scheme
|
|
||||||
address := c.endpointURL.Path
|
|
||||||
if streamOptions.stdout == nil {
|
|
||||||
streamOptions.stdout = ioutil.Discard
|
|
||||||
}
|
|
||||||
if streamOptions.stderr == nil {
|
|
||||||
streamOptions.stderr = ioutil.Discard
|
|
||||||
}
|
|
||||||
if protocol == "unix" {
|
|
||||||
dial, err := c.Dialer.Dial(protocol, address)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer dial.Close()
|
|
||||||
breader := bufio.NewReader(dial)
|
|
||||||
err = req.Write(dial)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadResponse may hang if server does not replay
|
|
||||||
if streamOptions.timeout > 0 {
|
|
||||||
dial.SetDeadline(time.Now().Add(streamOptions.timeout))
|
|
||||||
}
|
|
||||||
|
|
||||||
if resp, err = http.ReadResponse(breader, req); err != nil {
|
|
||||||
// Cancel timeout for future I/O operations
|
|
||||||
if streamOptions.timeout > 0 {
|
|
||||||
dial.SetDeadline(time.Time{})
|
|
||||||
}
|
|
||||||
if strings.Contains(err.Error(), "connection refused") {
|
|
||||||
return ErrConnectionRefused
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if resp, err = c.HTTPClient.Do(req); err != nil {
|
|
||||||
if strings.Contains(err.Error(), "connection refused") {
|
|
||||||
return ErrConnectionRefused
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
|
|
||||||
return newError(resp)
|
|
||||||
}
|
|
||||||
if streamOptions.useJSONDecoder || resp.Header.Get("Content-Type") == "application/json" {
|
|
||||||
// if we want to get raw json stream, just copy it back to output
|
|
||||||
// without decoding it
|
|
||||||
if streamOptions.rawJSONStream {
|
|
||||||
_, err = io.Copy(streamOptions.stdout, resp.Body)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
dec := json.NewDecoder(resp.Body)
|
|
||||||
for {
|
|
||||||
var m jsonMessage
|
|
||||||
if err := dec.Decode(&m); err == io.EOF {
|
|
||||||
break
|
|
||||||
} else if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if m.Stream != "" {
|
|
||||||
fmt.Fprint(streamOptions.stdout, m.Stream)
|
|
||||||
} else if m.Progress != "" {
|
|
||||||
fmt.Fprintf(streamOptions.stdout, "%s %s\r", m.Status, m.Progress)
|
|
||||||
} else if m.Error != "" {
|
|
||||||
return errors.New(m.Error)
|
|
||||||
}
|
|
||||||
if m.Status != "" {
|
|
||||||
fmt.Fprintln(streamOptions.stdout, m.Status)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if streamOptions.setRawTerminal {
|
|
||||||
_, err = io.Copy(streamOptions.stdout, resp.Body)
|
|
||||||
} else {
|
|
||||||
_, err = stdcopy.StdCopy(streamOptions.stdout, streamOptions.stderr, resp.Body)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
type hijackOptions struct {
|
|
||||||
success chan struct{}
|
|
||||||
setRawTerminal bool
|
|
||||||
in io.Reader
|
|
||||||
stdout io.Writer
|
|
||||||
stderr io.Writer
|
|
||||||
data interface{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// CloseWaiter is an interface with methods for closing the underlying resource
|
|
||||||
// and then waiting for it to finish processing.
|
|
||||||
type CloseWaiter interface {
|
|
||||||
io.Closer
|
|
||||||
Wait() error
|
|
||||||
}
|
|
||||||
|
|
||||||
type waiterFunc func() error
|
|
||||||
|
|
||||||
func (w waiterFunc) Wait() error { return w() }
|
|
||||||
|
|
||||||
type closerFunc func() error
|
|
||||||
|
|
||||||
func (c closerFunc) Close() error { return c() }
|
|
||||||
|
|
||||||
func (c *Client) hijack(method, path string, hijackOptions hijackOptions) (CloseWaiter, error) {
|
|
||||||
if path != "/version" && !c.SkipServerVersionCheck && c.expectedAPIVersion == nil {
|
|
||||||
err := c.checkAPIVersion()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
var params io.Reader
|
|
||||||
if hijackOptions.data != nil {
|
|
||||||
buf, err := json.Marshal(hijackOptions.data)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
params = bytes.NewBuffer(buf)
|
|
||||||
}
|
|
||||||
req, err := http.NewRequest(method, c.getURL(path), params)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
req.Header.Set("Content-Type", "application/json")
|
|
||||||
req.Header.Set("Connection", "Upgrade")
|
|
||||||
req.Header.Set("Upgrade", "tcp")
|
|
||||||
protocol := c.endpointURL.Scheme
|
|
||||||
address := c.endpointURL.Path
|
|
||||||
if protocol != "unix" {
|
|
||||||
protocol = "tcp"
|
|
||||||
address = c.endpointURL.Host
|
|
||||||
}
|
|
||||||
var dial net.Conn
|
|
||||||
if c.TLSConfig != nil && protocol != "unix" {
|
|
||||||
dial, err = tlsDialWithDialer(c.Dialer, protocol, address, c.TLSConfig)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
dial, err = c.Dialer.Dial(protocol, address)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
errs := make(chan error)
|
|
||||||
quit := make(chan struct{})
|
|
||||||
go func() {
|
|
||||||
clientconn := httputil.NewClientConn(dial, nil)
|
|
||||||
defer clientconn.Close()
|
|
||||||
clientconn.Do(req)
|
|
||||||
if hijackOptions.success != nil {
|
|
||||||
hijackOptions.success <- struct{}{}
|
|
||||||
<-hijackOptions.success
|
|
||||||
}
|
|
||||||
rwc, br := clientconn.Hijack()
|
|
||||||
defer rwc.Close()
|
|
||||||
|
|
||||||
errChanOut := make(chan error, 1)
|
|
||||||
errChanIn := make(chan error, 1)
|
|
||||||
if hijackOptions.stdout == nil && hijackOptions.stderr == nil {
|
|
||||||
close(errChanOut)
|
|
||||||
} else {
|
|
||||||
// Only copy if hijackOptions.stdout and/or hijackOptions.stderr is actually set.
|
|
||||||
// Otherwise, if the only stream you care about is stdin, your attach session
|
|
||||||
// will "hang" until the container terminates, even though you're not reading
|
|
||||||
// stdout/stderr
|
|
||||||
if hijackOptions.stdout == nil {
|
|
||||||
hijackOptions.stdout = ioutil.Discard
|
|
||||||
}
|
|
||||||
if hijackOptions.stderr == nil {
|
|
||||||
hijackOptions.stderr = ioutil.Discard
|
|
||||||
}
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
defer func() {
|
|
||||||
if hijackOptions.in != nil {
|
|
||||||
if closer, ok := hijackOptions.in.(io.Closer); ok {
|
|
||||||
closer.Close()
|
|
||||||
}
|
|
||||||
errChanIn <- nil
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
var err error
|
|
||||||
if hijackOptions.setRawTerminal {
|
|
||||||
_, err = io.Copy(hijackOptions.stdout, br)
|
|
||||||
} else {
|
|
||||||
_, err = stdcopy.StdCopy(hijackOptions.stdout, hijackOptions.stderr, br)
|
|
||||||
}
|
|
||||||
errChanOut <- err
|
|
||||||
}()
|
|
||||||
}
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
var err error
|
|
||||||
if hijackOptions.in != nil {
|
|
||||||
_, err = io.Copy(rwc, hijackOptions.in)
|
|
||||||
}
|
|
||||||
errChanIn <- err
|
|
||||||
rwc.(interface {
|
|
||||||
CloseWrite() error
|
|
||||||
}).CloseWrite()
|
|
||||||
}()
|
|
||||||
|
|
||||||
var errIn error
|
|
||||||
select {
|
|
||||||
case errIn = <-errChanIn:
|
|
||||||
case <-quit:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var errOut error
|
|
||||||
select {
|
|
||||||
case errOut = <-errChanOut:
|
|
||||||
case <-quit:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if errIn != nil {
|
|
||||||
errs <- errIn
|
|
||||||
} else {
|
|
||||||
errs <- errOut
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
return struct {
|
|
||||||
closerFunc
|
|
||||||
waiterFunc
|
|
||||||
}{
|
|
||||||
closerFunc(func() error { close(quit); return nil }),
|
|
||||||
waiterFunc(func() error { return <-errs }),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Client) getURL(path string) string {
|
|
||||||
urlStr := strings.TrimRight(c.endpointURL.String(), "/")
|
|
||||||
if c.endpointURL.Scheme == "unix" {
|
|
||||||
urlStr = ""
|
|
||||||
}
|
|
||||||
if c.requestedAPIVersion != nil {
|
|
||||||
return fmt.Sprintf("%s/v%s%s", urlStr, c.requestedAPIVersion, path)
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("%s%s", urlStr, path)
|
|
||||||
}
|
|
||||||
|
|
||||||
// getFakeUnixURL returns the URL needed to make an HTTP request over a UNIX
|
|
||||||
// domain socket to the given path.
|
|
||||||
func (c *Client) getFakeUnixURL(path string) string {
|
|
||||||
u := *c.endpointURL // Copy.
|
|
||||||
|
|
||||||
// Override URL so that net/http will not complain.
|
|
||||||
u.Scheme = "http"
|
|
||||||
u.Host = "unix.sock" // Doesn't matter what this is - it's not used.
|
|
||||||
u.Path = ""
|
|
||||||
urlStr := strings.TrimRight(u.String(), "/")
|
|
||||||
if c.requestedAPIVersion != nil {
|
|
||||||
return fmt.Sprintf("%s/v%s%s", urlStr, c.requestedAPIVersion, path)
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("%s%s", urlStr, path)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Client) unixClient() *http.Client {
|
|
||||||
if c.unixHTTPClient != nil {
|
|
||||||
return c.unixHTTPClient
|
|
||||||
}
|
|
||||||
socketPath := c.endpointURL.Path
|
|
||||||
tr := &http.Transport{
|
|
||||||
Dial: func(network, addr string) (net.Conn, error) {
|
|
||||||
return c.Dialer.Dial("unix", socketPath)
|
|
||||||
},
|
|
||||||
}
|
|
||||||
cleanhttp.SetTransportFinalizer(tr)
|
|
||||||
c.unixHTTPClient = &http.Client{Transport: tr}
|
|
||||||
return c.unixHTTPClient
|
|
||||||
}
|
|
||||||
|
|
||||||
type jsonMessage struct {
|
|
||||||
Status string `json:"status,omitempty"`
|
|
||||||
Progress string `json:"progress,omitempty"`
|
|
||||||
Error string `json:"error,omitempty"`
|
|
||||||
Stream string `json:"stream,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func queryString(opts interface{}) string {
|
|
||||||
if opts == nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
value := reflect.ValueOf(opts)
|
|
||||||
if value.Kind() == reflect.Ptr {
|
|
||||||
value = value.Elem()
|
|
||||||
}
|
|
||||||
if value.Kind() != reflect.Struct {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
items := url.Values(map[string][]string{})
|
|
||||||
for i := 0; i < value.NumField(); i++ {
|
|
||||||
field := value.Type().Field(i)
|
|
||||||
if field.PkgPath != "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
key := field.Tag.Get("qs")
|
|
||||||
if key == "" {
|
|
||||||
key = strings.ToLower(field.Name)
|
|
||||||
} else if key == "-" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
addQueryStringValue(items, key, value.Field(i))
|
|
||||||
}
|
|
||||||
return items.Encode()
|
|
||||||
}
|
|
||||||
|
|
||||||
func addQueryStringValue(items url.Values, key string, v reflect.Value) {
|
|
||||||
switch v.Kind() {
|
|
||||||
case reflect.Bool:
|
|
||||||
if v.Bool() {
|
|
||||||
items.Add(key, "1")
|
|
||||||
}
|
|
||||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
|
||||||
if v.Int() > 0 {
|
|
||||||
items.Add(key, strconv.FormatInt(v.Int(), 10))
|
|
||||||
}
|
|
||||||
case reflect.Float32, reflect.Float64:
|
|
||||||
if v.Float() > 0 {
|
|
||||||
items.Add(key, strconv.FormatFloat(v.Float(), 'f', -1, 64))
|
|
||||||
}
|
|
||||||
case reflect.String:
|
|
||||||
if v.String() != "" {
|
|
||||||
items.Add(key, v.String())
|
|
||||||
}
|
|
||||||
case reflect.Ptr:
|
|
||||||
if !v.IsNil() {
|
|
||||||
if b, err := json.Marshal(v.Interface()); err == nil {
|
|
||||||
items.Add(key, string(b))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case reflect.Map:
|
|
||||||
if len(v.MapKeys()) > 0 {
|
|
||||||
if b, err := json.Marshal(v.Interface()); err == nil {
|
|
||||||
items.Add(key, string(b))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
case reflect.Array, reflect.Slice:
|
|
||||||
vLen := v.Len()
|
|
||||||
if vLen > 0 {
|
|
||||||
for i := 0; i < vLen; i++ {
|
|
||||||
addQueryStringValue(items, key, v.Index(i))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error represents failures in the API. It represents a failure from the API.
|
|
||||||
type Error struct {
|
|
||||||
Status int
|
|
||||||
Message string
|
|
||||||
}
|
|
||||||
|
|
||||||
func newError(resp *http.Response) *Error {
|
|
||||||
defer resp.Body.Close()
|
|
||||||
data, err := ioutil.ReadAll(resp.Body)
|
|
||||||
if err != nil {
|
|
||||||
return &Error{Status: resp.StatusCode, Message: fmt.Sprintf("cannot read body, err: %v", err)}
|
|
||||||
}
|
|
||||||
return &Error{Status: resp.StatusCode, Message: string(data)}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Error) Error() string {
|
|
||||||
return fmt.Sprintf("API error (%d): %s", e.Status, e.Message)
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseEndpoint(endpoint string, tls bool) (*url.URL, error) {
|
|
||||||
if endpoint != "" && !strings.Contains(endpoint, "://") {
|
|
||||||
endpoint = "tcp://" + endpoint
|
|
||||||
}
|
|
||||||
u, err := url.Parse(endpoint)
|
|
||||||
if err != nil {
|
|
||||||
return nil, ErrInvalidEndpoint
|
|
||||||
}
|
|
||||||
if tls {
|
|
||||||
u.Scheme = "https"
|
|
||||||
}
|
|
||||||
switch u.Scheme {
|
|
||||||
case "unix":
|
|
||||||
return u, nil
|
|
||||||
case "http", "https", "tcp":
|
|
||||||
_, port, err := net.SplitHostPort(u.Host)
|
|
||||||
if err != nil {
|
|
||||||
if e, ok := err.(*net.AddrError); ok {
|
|
||||||
if e.Err == "missing port in address" {
|
|
||||||
return u, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil, ErrInvalidEndpoint
|
|
||||||
}
|
|
||||||
number, err := strconv.ParseInt(port, 10, 64)
|
|
||||||
if err == nil && number > 0 && number < 65536 {
|
|
||||||
if u.Scheme == "tcp" {
|
|
||||||
if tls {
|
|
||||||
u.Scheme = "https"
|
|
||||||
} else {
|
|
||||||
u.Scheme = "http"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return u, nil
|
|
||||||
}
|
|
||||||
return nil, ErrInvalidEndpoint
|
|
||||||
default:
|
|
||||||
return nil, ErrInvalidEndpoint
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type dockerEnv struct {
|
|
||||||
dockerHost string
|
|
||||||
dockerTLSVerify bool
|
|
||||||
dockerCertPath string
|
|
||||||
}
|
|
||||||
|
|
||||||
func getDockerEnv() (*dockerEnv, error) {
|
|
||||||
dockerHost := os.Getenv("DOCKER_HOST")
|
|
||||||
var err error
|
|
||||||
if dockerHost == "" {
|
|
||||||
dockerHost, err = DefaultDockerHost()
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
dockerTLSVerify := os.Getenv("DOCKER_TLS_VERIFY") != ""
|
|
||||||
var dockerCertPath string
|
|
||||||
if dockerTLSVerify {
|
|
||||||
dockerCertPath = os.Getenv("DOCKER_CERT_PATH")
|
|
||||||
if dockerCertPath == "" {
|
|
||||||
home := homedir.Get()
|
|
||||||
if home == "" {
|
|
||||||
return nil, errors.New("environment variable HOME must be set if DOCKER_CERT_PATH is not set")
|
|
||||||
}
|
|
||||||
dockerCertPath = filepath.Join(home, ".docker")
|
|
||||||
dockerCertPath, err = filepath.Abs(dockerCertPath)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return &dockerEnv{
|
|
||||||
dockerHost: dockerHost,
|
|
||||||
dockerTLSVerify: dockerTLSVerify,
|
|
||||||
dockerCertPath: dockerCertPath,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// DefaultDockerHost returns the default docker socket for the current OS
|
|
||||||
func DefaultDockerHost() (string, error) {
|
|
||||||
var defaultHost string
|
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
// If we do not have a host, default to TCP socket on Windows
|
|
||||||
defaultHost = fmt.Sprintf("tcp://%s:%d", opts.DefaultHTTPHost, opts.DefaultHTTPPort)
|
|
||||||
} else {
|
|
||||||
// If we do not have a host, default to unix socket
|
|
||||||
defaultHost = fmt.Sprintf("unix://%s", opts.DefaultUnixSocket)
|
|
||||||
}
|
|
||||||
return opts.ValidateHost(defaultHost)
|
|
||||||
}
|
|
||||||
1288
vendor/github.com/fsouza/go-dockerclient/container.go
generated
vendored
1288
vendor/github.com/fsouza/go-dockerclient/container.go
generated
vendored
File diff suppressed because it is too large
Load Diff
168
vendor/github.com/fsouza/go-dockerclient/env.go
generated
vendored
168
vendor/github.com/fsouza/go-dockerclient/env.go
generated
vendored
@@ -1,168 +0,0 @@
|
|||||||
// Copyright 2014 Docker authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the DOCKER-LICENSE file.
|
|
||||||
|
|
||||||
package docker
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Env represents a list of key-pair represented in the form KEY=VALUE.
|
|
||||||
type Env []string
|
|
||||||
|
|
||||||
// Get returns the string value of the given key.
|
|
||||||
func (env *Env) Get(key string) (value string) {
|
|
||||||
return env.Map()[key]
|
|
||||||
}
|
|
||||||
|
|
||||||
// Exists checks whether the given key is defined in the internal Env
|
|
||||||
// representation.
|
|
||||||
func (env *Env) Exists(key string) bool {
|
|
||||||
_, exists := env.Map()[key]
|
|
||||||
return exists
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetBool returns a boolean representation of the given key. The key is false
|
|
||||||
// whenever its value if 0, no, false, none or an empty string. Any other value
|
|
||||||
// will be interpreted as true.
|
|
||||||
func (env *Env) GetBool(key string) (value bool) {
|
|
||||||
s := strings.ToLower(strings.Trim(env.Get(key), " \t"))
|
|
||||||
if s == "" || s == "0" || s == "no" || s == "false" || s == "none" {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetBool defines a boolean value to the given key.
|
|
||||||
func (env *Env) SetBool(key string, value bool) {
|
|
||||||
if value {
|
|
||||||
env.Set(key, "1")
|
|
||||||
} else {
|
|
||||||
env.Set(key, "0")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetInt returns the value of the provided key, converted to int.
|
|
||||||
//
|
|
||||||
// It the value cannot be represented as an integer, it returns -1.
|
|
||||||
func (env *Env) GetInt(key string) int {
|
|
||||||
return int(env.GetInt64(key))
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetInt defines an integer value to the given key.
|
|
||||||
func (env *Env) SetInt(key string, value int) {
|
|
||||||
env.Set(key, strconv.Itoa(value))
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetInt64 returns the value of the provided key, converted to int64.
|
|
||||||
//
|
|
||||||
// It the value cannot be represented as an integer, it returns -1.
|
|
||||||
func (env *Env) GetInt64(key string) int64 {
|
|
||||||
s := strings.Trim(env.Get(key), " \t")
|
|
||||||
val, err := strconv.ParseInt(s, 10, 64)
|
|
||||||
if err != nil {
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
return val
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetInt64 defines an integer (64-bit wide) value to the given key.
|
|
||||||
func (env *Env) SetInt64(key string, value int64) {
|
|
||||||
env.Set(key, strconv.FormatInt(value, 10))
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetJSON unmarshals the value of the provided key in the provided iface.
|
|
||||||
//
|
|
||||||
// iface is a value that can be provided to the json.Unmarshal function.
|
|
||||||
func (env *Env) GetJSON(key string, iface interface{}) error {
|
|
||||||
sval := env.Get(key)
|
|
||||||
if sval == "" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return json.Unmarshal([]byte(sval), iface)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetJSON marshals the given value to JSON format and stores it using the
|
|
||||||
// provided key.
|
|
||||||
func (env *Env) SetJSON(key string, value interface{}) error {
|
|
||||||
sval, err := json.Marshal(value)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
env.Set(key, string(sval))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetList returns a list of strings matching the provided key. It handles the
|
|
||||||
// list as a JSON representation of a list of strings.
|
|
||||||
//
|
|
||||||
// If the given key matches to a single string, it will return a list
|
|
||||||
// containing only the value that matches the key.
|
|
||||||
func (env *Env) GetList(key string) []string {
|
|
||||||
sval := env.Get(key)
|
|
||||||
if sval == "" {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
var l []string
|
|
||||||
if err := json.Unmarshal([]byte(sval), &l); err != nil {
|
|
||||||
l = append(l, sval)
|
|
||||||
}
|
|
||||||
return l
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetList stores the given list in the provided key, after serializing it to
|
|
||||||
// JSON format.
|
|
||||||
func (env *Env) SetList(key string, value []string) error {
|
|
||||||
return env.SetJSON(key, value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set defines the value of a key to the given string.
|
|
||||||
func (env *Env) Set(key, value string) {
|
|
||||||
*env = append(*env, key+"="+value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Decode decodes `src` as a json dictionary, and adds each decoded key-value
|
|
||||||
// pair to the environment.
|
|
||||||
//
|
|
||||||
// If `src` cannot be decoded as a json dictionary, an error is returned.
|
|
||||||
func (env *Env) Decode(src io.Reader) error {
|
|
||||||
m := make(map[string]interface{})
|
|
||||||
if err := json.NewDecoder(src).Decode(&m); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for k, v := range m {
|
|
||||||
env.SetAuto(k, v)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetAuto will try to define the Set* method to call based on the given value.
|
|
||||||
func (env *Env) SetAuto(key string, value interface{}) {
|
|
||||||
if fval, ok := value.(float64); ok {
|
|
||||||
env.SetInt64(key, int64(fval))
|
|
||||||
} else if sval, ok := value.(string); ok {
|
|
||||||
env.Set(key, sval)
|
|
||||||
} else if val, err := json.Marshal(value); err == nil {
|
|
||||||
env.Set(key, string(val))
|
|
||||||
} else {
|
|
||||||
env.Set(key, fmt.Sprintf("%v", value))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Map returns the map representation of the env.
|
|
||||||
func (env *Env) Map() map[string]string {
|
|
||||||
if len(*env) == 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
m := make(map[string]string)
|
|
||||||
for _, kv := range *env {
|
|
||||||
parts := strings.SplitN(kv, "=", 2)
|
|
||||||
m[parts[0]] = parts[1]
|
|
||||||
}
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
370
vendor/github.com/fsouza/go-dockerclient/event.go
generated
vendored
370
vendor/github.com/fsouza/go-dockerclient/event.go
generated
vendored
@@ -1,370 +0,0 @@
|
|||||||
// Copyright 2015 go-dockerclient authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package docker
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"math"
|
|
||||||
"net"
|
|
||||||
"net/http"
|
|
||||||
"net/http/httputil"
|
|
||||||
"sync"
|
|
||||||
"sync/atomic"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// APIEvents represents events coming from the Docker API
|
|
||||||
// The fields in the Docker API changed in API version 1.22, and
|
|
||||||
// events for more than images and containers are now fired off.
|
|
||||||
// To maintain forward and backward compatibility, go-dockerclient
|
|
||||||
// replicates the event in both the new and old format as faithfully as possible.
|
|
||||||
//
|
|
||||||
// For events that only exist in 1.22 in later, `Status` is filled in as
|
|
||||||
// `"Type:Action"` instead of just `Action` to allow for older clients to
|
|
||||||
// differentiate and not break if they rely on the pre-1.22 Status types.
|
|
||||||
//
|
|
||||||
// The transformEvent method can be consulted for more information about how
|
|
||||||
// events are translated from new/old API formats
|
|
||||||
type APIEvents struct {
|
|
||||||
// New API Fields in 1.22
|
|
||||||
Action string `json:"action,omitempty"`
|
|
||||||
Type string `json:"type,omitempty"`
|
|
||||||
Actor APIActor `json:"actor,omitempty"`
|
|
||||||
|
|
||||||
// Old API fields for < 1.22
|
|
||||||
Status string `json:"status,omitempty"`
|
|
||||||
ID string `json:"id,omitempty"`
|
|
||||||
From string `json:"from,omitempty"`
|
|
||||||
|
|
||||||
// Fields in both
|
|
||||||
Time int64 `json:"time,omitempty"`
|
|
||||||
TimeNano int64 `json:"timeNano,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// APIActor represents an actor that accomplishes something for an event
|
|
||||||
type APIActor struct {
|
|
||||||
ID string `json:"id,omitempty"`
|
|
||||||
Attributes map[string]string `json:"attributes,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type eventMonitoringState struct {
|
|
||||||
sync.RWMutex
|
|
||||||
sync.WaitGroup
|
|
||||||
enabled bool
|
|
||||||
lastSeen *int64
|
|
||||||
C chan *APIEvents
|
|
||||||
errC chan error
|
|
||||||
listeners []chan<- *APIEvents
|
|
||||||
}
|
|
||||||
|
|
||||||
const (
|
|
||||||
maxMonitorConnRetries = 5
|
|
||||||
retryInitialWaitTime = 10.
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// ErrNoListeners is the error returned when no listeners are available
|
|
||||||
// to receive an event.
|
|
||||||
ErrNoListeners = errors.New("no listeners present to receive event")
|
|
||||||
|
|
||||||
// ErrListenerAlreadyExists is the error returned when the listerner already
|
|
||||||
// exists.
|
|
||||||
ErrListenerAlreadyExists = errors.New("listener already exists for docker events")
|
|
||||||
|
|
||||||
// EOFEvent is sent when the event listener receives an EOF error.
|
|
||||||
EOFEvent = &APIEvents{
|
|
||||||
Type: "EOF",
|
|
||||||
Status: "EOF",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
// AddEventListener adds a new listener to container events in the Docker API.
|
|
||||||
//
|
|
||||||
// The parameter is a channel through which events will be sent.
|
|
||||||
func (c *Client) AddEventListener(listener chan<- *APIEvents) error {
|
|
||||||
var err error
|
|
||||||
if !c.eventMonitor.isEnabled() {
|
|
||||||
err = c.eventMonitor.enableEventMonitoring(c)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
err = c.eventMonitor.addListener(listener)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveEventListener removes a listener from the monitor.
|
|
||||||
func (c *Client) RemoveEventListener(listener chan *APIEvents) error {
|
|
||||||
err := c.eventMonitor.removeListener(listener)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if len(c.eventMonitor.listeners) == 0 {
|
|
||||||
c.eventMonitor.disableEventMonitoring()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (eventState *eventMonitoringState) addListener(listener chan<- *APIEvents) error {
|
|
||||||
eventState.Lock()
|
|
||||||
defer eventState.Unlock()
|
|
||||||
if listenerExists(listener, &eventState.listeners) {
|
|
||||||
return ErrListenerAlreadyExists
|
|
||||||
}
|
|
||||||
eventState.Add(1)
|
|
||||||
eventState.listeners = append(eventState.listeners, listener)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (eventState *eventMonitoringState) removeListener(listener chan<- *APIEvents) error {
|
|
||||||
eventState.Lock()
|
|
||||||
defer eventState.Unlock()
|
|
||||||
if listenerExists(listener, &eventState.listeners) {
|
|
||||||
var newListeners []chan<- *APIEvents
|
|
||||||
for _, l := range eventState.listeners {
|
|
||||||
if l != listener {
|
|
||||||
newListeners = append(newListeners, l)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
eventState.listeners = newListeners
|
|
||||||
eventState.Add(-1)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (eventState *eventMonitoringState) closeListeners() {
|
|
||||||
for _, l := range eventState.listeners {
|
|
||||||
close(l)
|
|
||||||
eventState.Add(-1)
|
|
||||||
}
|
|
||||||
eventState.listeners = nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func listenerExists(a chan<- *APIEvents, list *[]chan<- *APIEvents) bool {
|
|
||||||
for _, b := range *list {
|
|
||||||
if b == a {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (eventState *eventMonitoringState) enableEventMonitoring(c *Client) error {
|
|
||||||
eventState.Lock()
|
|
||||||
defer eventState.Unlock()
|
|
||||||
if !eventState.enabled {
|
|
||||||
eventState.enabled = true
|
|
||||||
var lastSeenDefault = int64(0)
|
|
||||||
eventState.lastSeen = &lastSeenDefault
|
|
||||||
eventState.C = make(chan *APIEvents, 100)
|
|
||||||
eventState.errC = make(chan error, 1)
|
|
||||||
go eventState.monitorEvents(c)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (eventState *eventMonitoringState) disableEventMonitoring() error {
|
|
||||||
eventState.Lock()
|
|
||||||
defer eventState.Unlock()
|
|
||||||
|
|
||||||
eventState.closeListeners()
|
|
||||||
|
|
||||||
eventState.Wait()
|
|
||||||
|
|
||||||
if eventState.enabled {
|
|
||||||
eventState.enabled = false
|
|
||||||
close(eventState.C)
|
|
||||||
close(eventState.errC)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (eventState *eventMonitoringState) monitorEvents(c *Client) {
|
|
||||||
var err error
|
|
||||||
for eventState.noListeners() {
|
|
||||||
time.Sleep(10 * time.Millisecond)
|
|
||||||
}
|
|
||||||
if err = eventState.connectWithRetry(c); err != nil {
|
|
||||||
// terminate if connect failed
|
|
||||||
eventState.disableEventMonitoring()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for eventState.isEnabled() {
|
|
||||||
timeout := time.After(100 * time.Millisecond)
|
|
||||||
select {
|
|
||||||
case ev, ok := <-eventState.C:
|
|
||||||
if !ok {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if ev == EOFEvent {
|
|
||||||
eventState.disableEventMonitoring()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
eventState.updateLastSeen(ev)
|
|
||||||
go eventState.sendEvent(ev)
|
|
||||||
case err = <-eventState.errC:
|
|
||||||
if err == ErrNoListeners {
|
|
||||||
eventState.disableEventMonitoring()
|
|
||||||
return
|
|
||||||
} else if err != nil {
|
|
||||||
defer func() { go eventState.monitorEvents(c) }()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
case <-timeout:
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (eventState *eventMonitoringState) connectWithRetry(c *Client) error {
|
|
||||||
var retries int
|
|
||||||
var err error
|
|
||||||
for err = c.eventHijack(atomic.LoadInt64(eventState.lastSeen), eventState.C, eventState.errC); err != nil && retries < maxMonitorConnRetries; retries++ {
|
|
||||||
waitTime := int64(retryInitialWaitTime * math.Pow(2, float64(retries)))
|
|
||||||
time.Sleep(time.Duration(waitTime) * time.Millisecond)
|
|
||||||
err = c.eventHijack(atomic.LoadInt64(eventState.lastSeen), eventState.C, eventState.errC)
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (eventState *eventMonitoringState) noListeners() bool {
|
|
||||||
eventState.RLock()
|
|
||||||
defer eventState.RUnlock()
|
|
||||||
return len(eventState.listeners) == 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func (eventState *eventMonitoringState) isEnabled() bool {
|
|
||||||
eventState.RLock()
|
|
||||||
defer eventState.RUnlock()
|
|
||||||
return eventState.enabled
|
|
||||||
}
|
|
||||||
|
|
||||||
func (eventState *eventMonitoringState) sendEvent(event *APIEvents) {
|
|
||||||
eventState.RLock()
|
|
||||||
defer eventState.RUnlock()
|
|
||||||
eventState.Add(1)
|
|
||||||
defer eventState.Done()
|
|
||||||
if eventState.enabled {
|
|
||||||
if len(eventState.listeners) == 0 {
|
|
||||||
eventState.errC <- ErrNoListeners
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, listener := range eventState.listeners {
|
|
||||||
listener <- event
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (eventState *eventMonitoringState) updateLastSeen(e *APIEvents) {
|
|
||||||
eventState.Lock()
|
|
||||||
defer eventState.Unlock()
|
|
||||||
if atomic.LoadInt64(eventState.lastSeen) < e.Time {
|
|
||||||
atomic.StoreInt64(eventState.lastSeen, e.Time)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Client) eventHijack(startTime int64, eventChan chan *APIEvents, errChan chan error) error {
|
|
||||||
uri := "/events"
|
|
||||||
if startTime != 0 {
|
|
||||||
uri += fmt.Sprintf("?since=%d", startTime)
|
|
||||||
}
|
|
||||||
protocol := c.endpointURL.Scheme
|
|
||||||
address := c.endpointURL.Path
|
|
||||||
if protocol != "unix" {
|
|
||||||
protocol = "tcp"
|
|
||||||
address = c.endpointURL.Host
|
|
||||||
}
|
|
||||||
var dial net.Conn
|
|
||||||
var err error
|
|
||||||
if c.TLSConfig == nil {
|
|
||||||
dial, err = c.Dialer.Dial(protocol, address)
|
|
||||||
} else {
|
|
||||||
dial, err = tlsDialWithDialer(c.Dialer, protocol, address, c.TLSConfig)
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
conn := httputil.NewClientConn(dial, nil)
|
|
||||||
req, err := http.NewRequest("GET", uri, nil)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
res, err := conn.Do(req)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
go func(res *http.Response, conn *httputil.ClientConn) {
|
|
||||||
defer conn.Close()
|
|
||||||
defer res.Body.Close()
|
|
||||||
decoder := json.NewDecoder(res.Body)
|
|
||||||
for {
|
|
||||||
var event APIEvents
|
|
||||||
if err = decoder.Decode(&event); err != nil {
|
|
||||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
|
||||||
if c.eventMonitor.isEnabled() {
|
|
||||||
// Signal that we're exiting.
|
|
||||||
eventChan <- EOFEvent
|
|
||||||
}
|
|
||||||
break
|
|
||||||
}
|
|
||||||
errChan <- err
|
|
||||||
}
|
|
||||||
if event.Time == 0 {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if !c.eventMonitor.isEnabled() {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
transformEvent(&event)
|
|
||||||
eventChan <- &event
|
|
||||||
}
|
|
||||||
}(res, conn)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// transformEvent takes an event and determines what version it is from
|
|
||||||
// then populates both versions of the event
|
|
||||||
func transformEvent(event *APIEvents) {
|
|
||||||
// if event version is <= 1.21 there will be no Action and no Type
|
|
||||||
if event.Action == "" && event.Type == "" {
|
|
||||||
event.Action = event.Status
|
|
||||||
event.Actor.ID = event.ID
|
|
||||||
event.Actor.Attributes = map[string]string{}
|
|
||||||
switch event.Status {
|
|
||||||
case "delete", "import", "pull", "push", "tag", "untag":
|
|
||||||
event.Type = "image"
|
|
||||||
default:
|
|
||||||
event.Type = "container"
|
|
||||||
if event.From != "" {
|
|
||||||
event.Actor.Attributes["image"] = event.From
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if event.Status == "" {
|
|
||||||
if event.Type == "image" || event.Type == "container" {
|
|
||||||
event.Status = event.Action
|
|
||||||
} else {
|
|
||||||
// Because just the Status has been overloaded with different Types
|
|
||||||
// if an event is not for an image or a container, we prepend the type
|
|
||||||
// to avoid problems for people relying on actions being only for
|
|
||||||
// images and containers
|
|
||||||
event.Status = event.Type + ":" + event.Action
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if event.ID == "" {
|
|
||||||
event.ID = event.Actor.ID
|
|
||||||
}
|
|
||||||
if event.From == "" {
|
|
||||||
event.From = event.Actor.Attributes["image"]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
202
vendor/github.com/fsouza/go-dockerclient/exec.go
generated
vendored
202
vendor/github.com/fsouza/go-dockerclient/exec.go
generated
vendored
@@ -1,202 +0,0 @@
|
|||||||
// Copyright 2015 go-dockerclient authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package docker
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"net/url"
|
|
||||||
"strconv"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Exec is the type representing a `docker exec` instance and containing the
|
|
||||||
// instance ID
|
|
||||||
type Exec struct {
|
|
||||||
ID string `json:"Id,omitempty" yaml:"Id,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateExecOptions specify parameters to the CreateExecContainer function.
|
|
||||||
//
|
|
||||||
// See https://goo.gl/1KSIb7 for more details
|
|
||||||
type CreateExecOptions struct {
|
|
||||||
AttachStdin bool `json:"AttachStdin,omitempty" yaml:"AttachStdin,omitempty"`
|
|
||||||
AttachStdout bool `json:"AttachStdout,omitempty" yaml:"AttachStdout,omitempty"`
|
|
||||||
AttachStderr bool `json:"AttachStderr,omitempty" yaml:"AttachStderr,omitempty"`
|
|
||||||
Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty"`
|
|
||||||
Cmd []string `json:"Cmd,omitempty" yaml:"Cmd,omitempty"`
|
|
||||||
Container string `json:"Container,omitempty" yaml:"Container,omitempty"`
|
|
||||||
User string `json:"User,omitempty" yaml:"User,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateExec sets up an exec instance in a running container `id`, returning the exec
|
|
||||||
// instance, or an error in case of failure.
|
|
||||||
//
|
|
||||||
// See https://goo.gl/1KSIb7 for more details
|
|
||||||
func (c *Client) CreateExec(opts CreateExecOptions) (*Exec, error) {
|
|
||||||
path := fmt.Sprintf("/containers/%s/exec", opts.Container)
|
|
||||||
resp, err := c.do("POST", path, doOptions{data: opts})
|
|
||||||
if err != nil {
|
|
||||||
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
|
|
||||||
return nil, &NoSuchContainer{ID: opts.Container}
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
var exec Exec
|
|
||||||
if err := json.NewDecoder(resp.Body).Decode(&exec); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &exec, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// StartExecOptions specify parameters to the StartExecContainer function.
|
|
||||||
//
|
|
||||||
// See https://goo.gl/iQCnto for more details
|
|
||||||
type StartExecOptions struct {
|
|
||||||
Detach bool `json:"Detach,omitempty" yaml:"Detach,omitempty"`
|
|
||||||
|
|
||||||
Tty bool `json:"Tty,omitempty" yaml:"Tty,omitempty"`
|
|
||||||
|
|
||||||
InputStream io.Reader `qs:"-"`
|
|
||||||
OutputStream io.Writer `qs:"-"`
|
|
||||||
ErrorStream io.Writer `qs:"-"`
|
|
||||||
|
|
||||||
// Use raw terminal? Usually true when the container contains a TTY.
|
|
||||||
RawTerminal bool `qs:"-"`
|
|
||||||
|
|
||||||
// If set, after a successful connect, a sentinel will be sent and then the
|
|
||||||
// client will block on receive before continuing.
|
|
||||||
//
|
|
||||||
// It must be an unbuffered channel. Using a buffered channel can lead
|
|
||||||
// to unexpected behavior.
|
|
||||||
Success chan struct{} `json:"-"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// StartExec starts a previously set up exec instance id. If opts.Detach is
|
|
||||||
// true, it returns after starting the exec command. Otherwise, it sets up an
|
|
||||||
// interactive session with the exec command.
|
|
||||||
//
|
|
||||||
// See https://goo.gl/iQCnto for more details
|
|
||||||
func (c *Client) StartExec(id string, opts StartExecOptions) error {
|
|
||||||
cw, err := c.StartExecNonBlocking(id, opts)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if cw != nil {
|
|
||||||
return cw.Wait()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// StartExecNonBlocking starts a previously set up exec instance id. If opts.Detach is
|
|
||||||
// true, it returns after starting the exec command. Otherwise, it sets up an
|
|
||||||
// interactive session with the exec command.
|
|
||||||
//
|
|
||||||
// See https://goo.gl/iQCnto for more details
|
|
||||||
func (c *Client) StartExecNonBlocking(id string, opts StartExecOptions) (CloseWaiter, error) {
|
|
||||||
if id == "" {
|
|
||||||
return nil, &NoSuchExec{ID: id}
|
|
||||||
}
|
|
||||||
|
|
||||||
path := fmt.Sprintf("/exec/%s/start", id)
|
|
||||||
|
|
||||||
if opts.Detach {
|
|
||||||
resp, err := c.do("POST", path, doOptions{data: opts})
|
|
||||||
if err != nil {
|
|
||||||
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
|
|
||||||
return nil, &NoSuchExec{ID: id}
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
return nil, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
return c.hijack("POST", path, hijackOptions{
|
|
||||||
success: opts.Success,
|
|
||||||
setRawTerminal: opts.RawTerminal,
|
|
||||||
in: opts.InputStream,
|
|
||||||
stdout: opts.OutputStream,
|
|
||||||
stderr: opts.ErrorStream,
|
|
||||||
data: opts,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// ResizeExecTTY resizes the tty session used by the exec command id. This API
|
|
||||||
// is valid only if Tty was specified as part of creating and starting the exec
|
|
||||||
// command.
|
|
||||||
//
|
|
||||||
// See https://goo.gl/e1JpsA for more details
|
|
||||||
func (c *Client) ResizeExecTTY(id string, height, width int) error {
|
|
||||||
params := make(url.Values)
|
|
||||||
params.Set("h", strconv.Itoa(height))
|
|
||||||
params.Set("w", strconv.Itoa(width))
|
|
||||||
|
|
||||||
path := fmt.Sprintf("/exec/%s/resize?%s", id, params.Encode())
|
|
||||||
resp, err := c.do("POST", path, doOptions{})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
resp.Body.Close()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExecProcessConfig is a type describing the command associated to a Exec
|
|
||||||
// instance. It's used in the ExecInspect type.
|
|
||||||
type ExecProcessConfig struct {
|
|
||||||
Privileged bool `json:"privileged,omitempty" yaml:"privileged,omitempty"`
|
|
||||||
User string `json:"user,omitempty" yaml:"user,omitempty"`
|
|
||||||
Tty bool `json:"tty,omitempty" yaml:"tty,omitempty"`
|
|
||||||
EntryPoint string `json:"entrypoint,omitempty" yaml:"entrypoint,omitempty"`
|
|
||||||
Arguments []string `json:"arguments,omitempty" yaml:"arguments,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExecInspect is a type with details about a exec instance, including the
|
|
||||||
// exit code if the command has finished running. It's returned by a api
|
|
||||||
// call to /exec/(id)/json
|
|
||||||
//
|
|
||||||
// See https://goo.gl/gPtX9R for more details
|
|
||||||
type ExecInspect struct {
|
|
||||||
ID string `json:"ID,omitempty" yaml:"ID,omitempty"`
|
|
||||||
Running bool `json:"Running,omitempty" yaml:"Running,omitempty"`
|
|
||||||
ExitCode int `json:"ExitCode,omitempty" yaml:"ExitCode,omitempty"`
|
|
||||||
OpenStdin bool `json:"OpenStdin,omitempty" yaml:"OpenStdin,omitempty"`
|
|
||||||
OpenStderr bool `json:"OpenStderr,omitempty" yaml:"OpenStderr,omitempty"`
|
|
||||||
OpenStdout bool `json:"OpenStdout,omitempty" yaml:"OpenStdout,omitempty"`
|
|
||||||
ProcessConfig ExecProcessConfig `json:"ProcessConfig,omitempty" yaml:"ProcessConfig,omitempty"`
|
|
||||||
Container Container `json:"Container,omitempty" yaml:"Container,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// InspectExec returns low-level information about the exec command id.
|
|
||||||
//
|
|
||||||
// See https://goo.gl/gPtX9R for more details
|
|
||||||
func (c *Client) InspectExec(id string) (*ExecInspect, error) {
|
|
||||||
path := fmt.Sprintf("/exec/%s/json", id)
|
|
||||||
resp, err := c.do("GET", path, doOptions{})
|
|
||||||
if err != nil {
|
|
||||||
if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound {
|
|
||||||
return nil, &NoSuchExec{ID: id}
|
|
||||||
}
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
var exec ExecInspect
|
|
||||||
if err := json.NewDecoder(resp.Body).Decode(&exec); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &exec, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NoSuchExec is the error returned when a given exec instance does not exist.
|
|
||||||
type NoSuchExec struct {
|
|
||||||
ID string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (err *NoSuchExec) Error() string {
|
|
||||||
return "No such exec instance: " + err.ID
|
|
||||||
}
|
|
||||||
@@ -1,55 +0,0 @@
|
|||||||
# 0.9.0 (Unreleased)
|
|
||||||
|
|
||||||
* logrus/text_formatter: don't emit empty msg
|
|
||||||
* logrus/hooks/airbrake: move out of main repository
|
|
||||||
* logrus/hooks/sentry: move out of main repository
|
|
||||||
* logrus/hooks/papertrail: move out of main repository
|
|
||||||
* logrus/hooks/bugsnag: move out of main repository
|
|
||||||
|
|
||||||
# 0.8.7
|
|
||||||
|
|
||||||
* logrus/core: fix possible race (#216)
|
|
||||||
* logrus/doc: small typo fixes and doc improvements
|
|
||||||
|
|
||||||
|
|
||||||
# 0.8.6
|
|
||||||
|
|
||||||
* hooks/raven: allow passing an initialized client
|
|
||||||
|
|
||||||
# 0.8.5
|
|
||||||
|
|
||||||
* logrus/core: revert #208
|
|
||||||
|
|
||||||
# 0.8.4
|
|
||||||
|
|
||||||
* formatter/text: fix data race (#218)
|
|
||||||
|
|
||||||
# 0.8.3
|
|
||||||
|
|
||||||
* logrus/core: fix entry log level (#208)
|
|
||||||
* logrus/core: improve performance of text formatter by 40%
|
|
||||||
* logrus/core: expose `LevelHooks` type
|
|
||||||
* logrus/core: add support for DragonflyBSD and NetBSD
|
|
||||||
* formatter/text: print structs more verbosely
|
|
||||||
|
|
||||||
# 0.8.2
|
|
||||||
|
|
||||||
* logrus: fix more Fatal family functions
|
|
||||||
|
|
||||||
# 0.8.1
|
|
||||||
|
|
||||||
* logrus: fix not exiting on `Fatalf` and `Fatalln`
|
|
||||||
|
|
||||||
# 0.8.0
|
|
||||||
|
|
||||||
* logrus: defaults to stderr instead of stdout
|
|
||||||
* hooks/sentry: add special field for `*http.Request`
|
|
||||||
* formatter/text: ignore Windows for colors
|
|
||||||
|
|
||||||
# 0.7.3
|
|
||||||
|
|
||||||
* formatter/\*: allow configuration of timestamp layout
|
|
||||||
|
|
||||||
# 0.7.2
|
|
||||||
|
|
||||||
* formatter/text: Add configuration option for time format (#158)
|
|
||||||
21
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/LICENSE
generated
vendored
21
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/LICENSE
generated
vendored
@@ -1,21 +0,0 @@
|
|||||||
The MIT License (MIT)
|
|
||||||
|
|
||||||
Copyright (c) 2014 Simon Eskildsen
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
|
||||||
in the Software without restriction, including without limitation the rights
|
|
||||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
copies of the Software, and to permit persons to whom the Software is
|
|
||||||
furnished to do so, subject to the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be included in
|
|
||||||
all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
||||||
THE SOFTWARE.
|
|
||||||
365
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/README.md
generated
vendored
365
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/README.md
generated
vendored
@@ -1,365 +0,0 @@
|
|||||||
# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/> [](https://travis-ci.org/Sirupsen/logrus) [][godoc]
|
|
||||||
|
|
||||||
Logrus is a structured logger for Go (golang), completely API compatible with
|
|
||||||
the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
|
|
||||||
yet stable (pre 1.0). Logrus itself is completely stable and has been used in
|
|
||||||
many large deployments. The core API is unlikely to change much but please
|
|
||||||
version control your Logrus to make sure you aren't fetching latest `master` on
|
|
||||||
every build.**
|
|
||||||
|
|
||||||
Nicely color-coded in development (when a TTY is attached, otherwise just
|
|
||||||
plain text):
|
|
||||||
|
|
||||||

|
|
||||||
|
|
||||||
With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash
|
|
||||||
or Splunk:
|
|
||||||
|
|
||||||
```json
|
|
||||||
{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
|
|
||||||
ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
|
|
||||||
|
|
||||||
{"level":"warning","msg":"The group's number increased tremendously!",
|
|
||||||
"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
|
|
||||||
|
|
||||||
{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
|
|
||||||
"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
|
|
||||||
|
|
||||||
{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
|
|
||||||
"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
|
|
||||||
|
|
||||||
{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
|
|
||||||
"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
|
|
||||||
```
|
|
||||||
|
|
||||||
With the default `log.Formatter = new(&log.TextFormatter{})` when a TTY is not
|
|
||||||
attached, the output is compatible with the
|
|
||||||
[logfmt](http://godoc.org/github.com/kr/logfmt) format:
|
|
||||||
|
|
||||||
```text
|
|
||||||
time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
|
|
||||||
time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
|
|
||||||
time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
|
|
||||||
time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
|
|
||||||
time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
|
|
||||||
time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
|
|
||||||
exit status 1
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Example
|
|
||||||
|
|
||||||
The simplest way to use Logrus is simply the package-level exported logger:
|
|
||||||
|
|
||||||
```go
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
log "github.com/Sirupsen/logrus"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
log.WithFields(log.Fields{
|
|
||||||
"animal": "walrus",
|
|
||||||
}).Info("A walrus appears")
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Note that it's completely api-compatible with the stdlib logger, so you can
|
|
||||||
replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"`
|
|
||||||
and you'll now have the flexibility of Logrus. You can customize it all you
|
|
||||||
want:
|
|
||||||
|
|
||||||
```go
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
log "github.com/Sirupsen/logrus"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
// Log as JSON instead of the default ASCII formatter.
|
|
||||||
log.SetFormatter(&log.JSONFormatter{})
|
|
||||||
|
|
||||||
// Output to stderr instead of stdout, could also be a file.
|
|
||||||
log.SetOutput(os.Stderr)
|
|
||||||
|
|
||||||
// Only log the warning severity or above.
|
|
||||||
log.SetLevel(log.WarnLevel)
|
|
||||||
}
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
log.WithFields(log.Fields{
|
|
||||||
"animal": "walrus",
|
|
||||||
"size": 10,
|
|
||||||
}).Info("A group of walrus emerges from the ocean")
|
|
||||||
|
|
||||||
log.WithFields(log.Fields{
|
|
||||||
"omg": true,
|
|
||||||
"number": 122,
|
|
||||||
}).Warn("The group's number increased tremendously!")
|
|
||||||
|
|
||||||
log.WithFields(log.Fields{
|
|
||||||
"omg": true,
|
|
||||||
"number": 100,
|
|
||||||
}).Fatal("The ice breaks!")
|
|
||||||
|
|
||||||
// A common pattern is to re-use fields between logging statements by re-using
|
|
||||||
// the logrus.Entry returned from WithFields()
|
|
||||||
contextLogger := log.WithFields(log.Fields{
|
|
||||||
"common": "this is a common field",
|
|
||||||
"other": "I also should be logged always",
|
|
||||||
})
|
|
||||||
|
|
||||||
contextLogger.Info("I'll be logged with common and other field")
|
|
||||||
contextLogger.Info("Me too")
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
For more advanced usage such as logging to multiple locations from the same
|
|
||||||
application, you can also create an instance of the `logrus` Logger:
|
|
||||||
|
|
||||||
```go
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/Sirupsen/logrus"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Create a new instance of the logger. You can have any number of instances.
|
|
||||||
var log = logrus.New()
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
// The API for setting attributes is a little different than the package level
|
|
||||||
// exported logger. See Godoc.
|
|
||||||
log.Out = os.Stderr
|
|
||||||
|
|
||||||
log.WithFields(logrus.Fields{
|
|
||||||
"animal": "walrus",
|
|
||||||
"size": 10,
|
|
||||||
}).Info("A group of walrus emerges from the ocean")
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Fields
|
|
||||||
|
|
||||||
Logrus encourages careful, structured logging though logging fields instead of
|
|
||||||
long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
|
|
||||||
to send event %s to topic %s with key %d")`, you should log the much more
|
|
||||||
discoverable:
|
|
||||||
|
|
||||||
```go
|
|
||||||
log.WithFields(log.Fields{
|
|
||||||
"event": event,
|
|
||||||
"topic": topic,
|
|
||||||
"key": key,
|
|
||||||
}).Fatal("Failed to send event")
|
|
||||||
```
|
|
||||||
|
|
||||||
We've found this API forces you to think about logging in a way that produces
|
|
||||||
much more useful logging messages. We've been in countless situations where just
|
|
||||||
a single added field to a log statement that was already there would've saved us
|
|
||||||
hours. The `WithFields` call is optional.
|
|
||||||
|
|
||||||
In general, with Logrus using any of the `printf`-family functions should be
|
|
||||||
seen as a hint you should add a field, however, you can still use the
|
|
||||||
`printf`-family functions with Logrus.
|
|
||||||
|
|
||||||
#### Hooks
|
|
||||||
|
|
||||||
You can add hooks for logging levels. For example to send errors to an exception
|
|
||||||
tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
|
|
||||||
multiple places simultaneously, e.g. syslog.
|
|
||||||
|
|
||||||
Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
|
|
||||||
`init`:
|
|
||||||
|
|
||||||
```go
|
|
||||||
import (
|
|
||||||
log "github.com/Sirupsen/logrus"
|
|
||||||
"gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake"
|
|
||||||
logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog"
|
|
||||||
"log/syslog"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
|
|
||||||
// Use the Airbrake hook to report errors that have Error severity or above to
|
|
||||||
// an exception tracker. You can create custom hooks, see the Hooks section.
|
|
||||||
log.AddHook(airbrake.NewHook(123, "xyz", "production"))
|
|
||||||
|
|
||||||
hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
|
|
||||||
if err != nil {
|
|
||||||
log.Error("Unable to connect to local syslog daemon")
|
|
||||||
} else {
|
|
||||||
log.AddHook(hook)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
|
|
||||||
|
|
||||||
| Hook | Description |
|
|
||||||
| ----- | ----------- |
|
|
||||||
| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
|
|
||||||
| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
|
|
||||||
| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
|
|
||||||
| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
|
|
||||||
| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
|
|
||||||
| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
|
|
||||||
| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
|
|
||||||
| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
|
|
||||||
| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
|
|
||||||
| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
|
|
||||||
| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
|
|
||||||
| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
|
|
||||||
| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
|
|
||||||
| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
|
|
||||||
| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
|
|
||||||
| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
|
|
||||||
| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
|
|
||||||
| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
|
|
||||||
| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
|
|
||||||
| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
|
|
||||||
| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
|
|
||||||
|
|
||||||
#### Level logging
|
|
||||||
|
|
||||||
Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
|
|
||||||
|
|
||||||
```go
|
|
||||||
log.Debug("Useful debugging information.")
|
|
||||||
log.Info("Something noteworthy happened!")
|
|
||||||
log.Warn("You should probably take a look at this.")
|
|
||||||
log.Error("Something failed but I'm not quitting.")
|
|
||||||
// Calls os.Exit(1) after logging
|
|
||||||
log.Fatal("Bye.")
|
|
||||||
// Calls panic() after logging
|
|
||||||
log.Panic("I'm bailing.")
|
|
||||||
```
|
|
||||||
|
|
||||||
You can set the logging level on a `Logger`, then it will only log entries with
|
|
||||||
that severity or anything above it:
|
|
||||||
|
|
||||||
```go
|
|
||||||
// Will log anything that is info or above (warn, error, fatal, panic). Default.
|
|
||||||
log.SetLevel(log.InfoLevel)
|
|
||||||
```
|
|
||||||
|
|
||||||
It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
|
|
||||||
environment if your application has that.
|
|
||||||
|
|
||||||
#### Entries
|
|
||||||
|
|
||||||
Besides the fields added with `WithField` or `WithFields` some fields are
|
|
||||||
automatically added to all logging events:
|
|
||||||
|
|
||||||
1. `time`. The timestamp when the entry was created.
|
|
||||||
2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
|
|
||||||
the `AddFields` call. E.g. `Failed to send event.`
|
|
||||||
3. `level`. The logging level. E.g. `info`.
|
|
||||||
|
|
||||||
#### Environments
|
|
||||||
|
|
||||||
Logrus has no notion of environment.
|
|
||||||
|
|
||||||
If you wish for hooks and formatters to only be used in specific environments,
|
|
||||||
you should handle that yourself. For example, if your application has a global
|
|
||||||
variable `Environment`, which is a string representation of the environment you
|
|
||||||
could do:
|
|
||||||
|
|
||||||
```go
|
|
||||||
import (
|
|
||||||
log "github.com/Sirupsen/logrus"
|
|
||||||
)
|
|
||||||
|
|
||||||
init() {
|
|
||||||
// do something here to set environment depending on an environment variable
|
|
||||||
// or command-line flag
|
|
||||||
if Environment == "production" {
|
|
||||||
log.SetFormatter(&log.JSONFormatter{})
|
|
||||||
} else {
|
|
||||||
// The TextFormatter is default, you don't actually have to do this.
|
|
||||||
log.SetFormatter(&log.TextFormatter{})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
This configuration is how `logrus` was intended to be used, but JSON in
|
|
||||||
production is mostly only useful if you do log aggregation with tools like
|
|
||||||
Splunk or Logstash.
|
|
||||||
|
|
||||||
#### Formatters
|
|
||||||
|
|
||||||
The built-in logging formatters are:
|
|
||||||
|
|
||||||
* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
|
|
||||||
without colors.
|
|
||||||
* *Note:* to force colored output when there is no TTY, set the `ForceColors`
|
|
||||||
field to `true`. To force no colored output even if there is a TTY set the
|
|
||||||
`DisableColors` field to `true`
|
|
||||||
* `logrus.JSONFormatter`. Logs fields as JSON.
|
|
||||||
* `logrus/formatters/logstash.LogstashFormatter`. Logs fields as [Logstash](http://logstash.net) Events.
|
|
||||||
|
|
||||||
```go
|
|
||||||
logrus.SetFormatter(&logstash.LogstashFormatter{Type: "application_name"})
|
|
||||||
```
|
|
||||||
|
|
||||||
Third party logging formatters:
|
|
||||||
|
|
||||||
* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
|
|
||||||
* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
|
|
||||||
|
|
||||||
You can define your formatter by implementing the `Formatter` interface,
|
|
||||||
requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
|
|
||||||
`Fields` type (`map[string]interface{}`) with all your fields as well as the
|
|
||||||
default ones (see Entries section above):
|
|
||||||
|
|
||||||
```go
|
|
||||||
type MyJSONFormatter struct {
|
|
||||||
}
|
|
||||||
|
|
||||||
log.SetFormatter(new(MyJSONFormatter))
|
|
||||||
|
|
||||||
func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
|
|
||||||
// Note this doesn't include Time, Level and Message which are available on
|
|
||||||
// the Entry. Consult `godoc` on information about those fields or read the
|
|
||||||
// source of the official loggers.
|
|
||||||
serialized, err := json.Marshal(entry.Data)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
|
|
||||||
}
|
|
||||||
return append(serialized, '\n'), nil
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
#### Logger as an `io.Writer`
|
|
||||||
|
|
||||||
Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
|
|
||||||
|
|
||||||
```go
|
|
||||||
w := logger.Writer()
|
|
||||||
defer w.Close()
|
|
||||||
|
|
||||||
srv := http.Server{
|
|
||||||
// create a stdlib log.Logger that writes to
|
|
||||||
// logrus.Logger.
|
|
||||||
ErrorLog: log.New(w, "", 0),
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
Each line written to that writer will be printed the usual way, using formatters
|
|
||||||
and hooks. The level for those entries is `info`.
|
|
||||||
|
|
||||||
#### Rotation
|
|
||||||
|
|
||||||
Log rotation is not provided with Logrus. Log rotation should be done by an
|
|
||||||
external program (like `logrotate(8)`) that can compress and delete old log
|
|
||||||
entries. It should not be a feature of the application-level logger.
|
|
||||||
|
|
||||||
#### Tools
|
|
||||||
|
|
||||||
| Tool | Description |
|
|
||||||
| ---- | ----------- |
|
|
||||||
|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
|
|
||||||
|
|
||||||
[godoc]: https://godoc.org/github.com/Sirupsen/logrus
|
|
||||||
26
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/doc.go
generated
vendored
26
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/doc.go
generated
vendored
@@ -1,26 +0,0 @@
|
|||||||
/*
|
|
||||||
Package logrus is a structured logger for Go, completely API compatible with the standard library logger.
|
|
||||||
|
|
||||||
|
|
||||||
The simplest way to use Logrus is simply the package-level exported logger:
|
|
||||||
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
log "github.com/Sirupsen/logrus"
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
log.WithFields(log.Fields{
|
|
||||||
"animal": "walrus",
|
|
||||||
"number": 1,
|
|
||||||
"size": 10,
|
|
||||||
}).Info("A walrus appears")
|
|
||||||
}
|
|
||||||
|
|
||||||
Output:
|
|
||||||
time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
|
|
||||||
|
|
||||||
For a full guide visit https://github.com/Sirupsen/logrus
|
|
||||||
*/
|
|
||||||
package logrus
|
|
||||||
264
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.go
generated
vendored
264
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.go
generated
vendored
@@ -1,264 +0,0 @@
|
|||||||
package logrus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Defines the key when adding errors using WithError.
|
|
||||||
var ErrorKey = "error"
|
|
||||||
|
|
||||||
// An entry is the final or intermediate Logrus logging entry. It contains all
|
|
||||||
// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
|
|
||||||
// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
|
|
||||||
// passed around as much as you wish to avoid field duplication.
|
|
||||||
type Entry struct {
|
|
||||||
Logger *Logger
|
|
||||||
|
|
||||||
// Contains all the fields set by the user.
|
|
||||||
Data Fields
|
|
||||||
|
|
||||||
// Time at which the log entry was created
|
|
||||||
Time time.Time
|
|
||||||
|
|
||||||
// Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
|
|
||||||
Level Level
|
|
||||||
|
|
||||||
// Message passed to Debug, Info, Warn, Error, Fatal or Panic
|
|
||||||
Message string
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewEntry(logger *Logger) *Entry {
|
|
||||||
return &Entry{
|
|
||||||
Logger: logger,
|
|
||||||
// Default is three fields, give a little extra room
|
|
||||||
Data: make(Fields, 5),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns a reader for the entry, which is a proxy to the formatter.
|
|
||||||
func (entry *Entry) Reader() (*bytes.Buffer, error) {
|
|
||||||
serialized, err := entry.Logger.Formatter.Format(entry)
|
|
||||||
return bytes.NewBuffer(serialized), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns the string representation from the reader and ultimately the
|
|
||||||
// formatter.
|
|
||||||
func (entry *Entry) String() (string, error) {
|
|
||||||
reader, err := entry.Reader()
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
return reader.String(), err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add an error as single field (using the key defined in ErrorKey) to the Entry.
|
|
||||||
func (entry *Entry) WithError(err error) *Entry {
|
|
||||||
return entry.WithField(ErrorKey, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add a single field to the Entry.
|
|
||||||
func (entry *Entry) WithField(key string, value interface{}) *Entry {
|
|
||||||
return entry.WithFields(Fields{key: value})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add a map of fields to the Entry.
|
|
||||||
func (entry *Entry) WithFields(fields Fields) *Entry {
|
|
||||||
data := Fields{}
|
|
||||||
for k, v := range entry.Data {
|
|
||||||
data[k] = v
|
|
||||||
}
|
|
||||||
for k, v := range fields {
|
|
||||||
data[k] = v
|
|
||||||
}
|
|
||||||
return &Entry{Logger: entry.Logger, Data: data}
|
|
||||||
}
|
|
||||||
|
|
||||||
// This function is not declared with a pointer value because otherwise
|
|
||||||
// race conditions will occur when using multiple goroutines
|
|
||||||
func (entry Entry) log(level Level, msg string) {
|
|
||||||
entry.Time = time.Now()
|
|
||||||
entry.Level = level
|
|
||||||
entry.Message = msg
|
|
||||||
|
|
||||||
if err := entry.Logger.Hooks.Fire(level, &entry); err != nil {
|
|
||||||
entry.Logger.mu.Lock()
|
|
||||||
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
|
|
||||||
entry.Logger.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
reader, err := entry.Reader()
|
|
||||||
if err != nil {
|
|
||||||
entry.Logger.mu.Lock()
|
|
||||||
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
|
|
||||||
entry.Logger.mu.Unlock()
|
|
||||||
}
|
|
||||||
|
|
||||||
entry.Logger.mu.Lock()
|
|
||||||
defer entry.Logger.mu.Unlock()
|
|
||||||
|
|
||||||
_, err = io.Copy(entry.Logger.Out, reader)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// To avoid Entry#log() returning a value that only would make sense for
|
|
||||||
// panic() to use in Entry#Panic(), we avoid the allocation by checking
|
|
||||||
// directly here.
|
|
||||||
if level <= PanicLevel {
|
|
||||||
panic(&entry)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (entry *Entry) Debug(args ...interface{}) {
|
|
||||||
if entry.Logger.Level >= DebugLevel {
|
|
||||||
entry.log(DebugLevel, fmt.Sprint(args...))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (entry *Entry) Print(args ...interface{}) {
|
|
||||||
entry.Info(args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (entry *Entry) Info(args ...interface{}) {
|
|
||||||
if entry.Logger.Level >= InfoLevel {
|
|
||||||
entry.log(InfoLevel, fmt.Sprint(args...))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (entry *Entry) Warn(args ...interface{}) {
|
|
||||||
if entry.Logger.Level >= WarnLevel {
|
|
||||||
entry.log(WarnLevel, fmt.Sprint(args...))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (entry *Entry) Warning(args ...interface{}) {
|
|
||||||
entry.Warn(args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (entry *Entry) Error(args ...interface{}) {
|
|
||||||
if entry.Logger.Level >= ErrorLevel {
|
|
||||||
entry.log(ErrorLevel, fmt.Sprint(args...))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (entry *Entry) Fatal(args ...interface{}) {
|
|
||||||
if entry.Logger.Level >= FatalLevel {
|
|
||||||
entry.log(FatalLevel, fmt.Sprint(args...))
|
|
||||||
}
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (entry *Entry) Panic(args ...interface{}) {
|
|
||||||
if entry.Logger.Level >= PanicLevel {
|
|
||||||
entry.log(PanicLevel, fmt.Sprint(args...))
|
|
||||||
}
|
|
||||||
panic(fmt.Sprint(args...))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Entry Printf family functions
|
|
||||||
|
|
||||||
func (entry *Entry) Debugf(format string, args ...interface{}) {
|
|
||||||
if entry.Logger.Level >= DebugLevel {
|
|
||||||
entry.Debug(fmt.Sprintf(format, args...))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (entry *Entry) Infof(format string, args ...interface{}) {
|
|
||||||
if entry.Logger.Level >= InfoLevel {
|
|
||||||
entry.Info(fmt.Sprintf(format, args...))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (entry *Entry) Printf(format string, args ...interface{}) {
|
|
||||||
entry.Infof(format, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (entry *Entry) Warnf(format string, args ...interface{}) {
|
|
||||||
if entry.Logger.Level >= WarnLevel {
|
|
||||||
entry.Warn(fmt.Sprintf(format, args...))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (entry *Entry) Warningf(format string, args ...interface{}) {
|
|
||||||
entry.Warnf(format, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (entry *Entry) Errorf(format string, args ...interface{}) {
|
|
||||||
if entry.Logger.Level >= ErrorLevel {
|
|
||||||
entry.Error(fmt.Sprintf(format, args...))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (entry *Entry) Fatalf(format string, args ...interface{}) {
|
|
||||||
if entry.Logger.Level >= FatalLevel {
|
|
||||||
entry.Fatal(fmt.Sprintf(format, args...))
|
|
||||||
}
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (entry *Entry) Panicf(format string, args ...interface{}) {
|
|
||||||
if entry.Logger.Level >= PanicLevel {
|
|
||||||
entry.Panic(fmt.Sprintf(format, args...))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Entry Println family functions
|
|
||||||
|
|
||||||
func (entry *Entry) Debugln(args ...interface{}) {
|
|
||||||
if entry.Logger.Level >= DebugLevel {
|
|
||||||
entry.Debug(entry.sprintlnn(args...))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (entry *Entry) Infoln(args ...interface{}) {
|
|
||||||
if entry.Logger.Level >= InfoLevel {
|
|
||||||
entry.Info(entry.sprintlnn(args...))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (entry *Entry) Println(args ...interface{}) {
|
|
||||||
entry.Infoln(args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (entry *Entry) Warnln(args ...interface{}) {
|
|
||||||
if entry.Logger.Level >= WarnLevel {
|
|
||||||
entry.Warn(entry.sprintlnn(args...))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (entry *Entry) Warningln(args ...interface{}) {
|
|
||||||
entry.Warnln(args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (entry *Entry) Errorln(args ...interface{}) {
|
|
||||||
if entry.Logger.Level >= ErrorLevel {
|
|
||||||
entry.Error(entry.sprintlnn(args...))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (entry *Entry) Fatalln(args ...interface{}) {
|
|
||||||
if entry.Logger.Level >= FatalLevel {
|
|
||||||
entry.Fatal(entry.sprintlnn(args...))
|
|
||||||
}
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (entry *Entry) Panicln(args ...interface{}) {
|
|
||||||
if entry.Logger.Level >= PanicLevel {
|
|
||||||
entry.Panic(entry.sprintlnn(args...))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sprintlnn => Sprint no newline. This is to get the behavior of how
|
|
||||||
// fmt.Sprintln where spaces are always added between operands, regardless of
|
|
||||||
// their type. Instead of vendoring the Sprintln implementation to spare a
|
|
||||||
// string allocation, we do the simplest thing.
|
|
||||||
func (entry *Entry) sprintlnn(args ...interface{}) string {
|
|
||||||
msg := fmt.Sprintln(args...)
|
|
||||||
return msg[:len(msg)-1]
|
|
||||||
}
|
|
||||||
193
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.go
generated
vendored
193
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.go
generated
vendored
@@ -1,193 +0,0 @@
|
|||||||
package logrus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// std is the name of the standard logger in stdlib `log`
|
|
||||||
std = New()
|
|
||||||
)
|
|
||||||
|
|
||||||
func StandardLogger() *Logger {
|
|
||||||
return std
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetOutput sets the standard logger output.
|
|
||||||
func SetOutput(out io.Writer) {
|
|
||||||
std.mu.Lock()
|
|
||||||
defer std.mu.Unlock()
|
|
||||||
std.Out = out
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetFormatter sets the standard logger formatter.
|
|
||||||
func SetFormatter(formatter Formatter) {
|
|
||||||
std.mu.Lock()
|
|
||||||
defer std.mu.Unlock()
|
|
||||||
std.Formatter = formatter
|
|
||||||
}
|
|
||||||
|
|
||||||
// SetLevel sets the standard logger level.
|
|
||||||
func SetLevel(level Level) {
|
|
||||||
std.mu.Lock()
|
|
||||||
defer std.mu.Unlock()
|
|
||||||
std.Level = level
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetLevel returns the standard logger level.
|
|
||||||
func GetLevel() Level {
|
|
||||||
std.mu.Lock()
|
|
||||||
defer std.mu.Unlock()
|
|
||||||
return std.Level
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddHook adds a hook to the standard logger hooks.
|
|
||||||
func AddHook(hook Hook) {
|
|
||||||
std.mu.Lock()
|
|
||||||
defer std.mu.Unlock()
|
|
||||||
std.Hooks.Add(hook)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
|
|
||||||
func WithError(err error) *Entry {
|
|
||||||
return std.WithField(ErrorKey, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithField creates an entry from the standard logger and adds a field to
|
|
||||||
// it. If you want multiple fields, use `WithFields`.
|
|
||||||
//
|
|
||||||
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
|
|
||||||
// or Panic on the Entry it returns.
|
|
||||||
func WithField(key string, value interface{}) *Entry {
|
|
||||||
return std.WithField(key, value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// WithFields creates an entry from the standard logger and adds multiple
|
|
||||||
// fields to it. This is simply a helper for `WithField`, invoking it
|
|
||||||
// once for each field.
|
|
||||||
//
|
|
||||||
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
|
|
||||||
// or Panic on the Entry it returns.
|
|
||||||
func WithFields(fields Fields) *Entry {
|
|
||||||
return std.WithFields(fields)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Debug logs a message at level Debug on the standard logger.
|
|
||||||
func Debug(args ...interface{}) {
|
|
||||||
std.Debug(args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Print logs a message at level Info on the standard logger.
|
|
||||||
func Print(args ...interface{}) {
|
|
||||||
std.Print(args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Info logs a message at level Info on the standard logger.
|
|
||||||
func Info(args ...interface{}) {
|
|
||||||
std.Info(args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Warn logs a message at level Warn on the standard logger.
|
|
||||||
func Warn(args ...interface{}) {
|
|
||||||
std.Warn(args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Warning logs a message at level Warn on the standard logger.
|
|
||||||
func Warning(args ...interface{}) {
|
|
||||||
std.Warning(args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error logs a message at level Error on the standard logger.
|
|
||||||
func Error(args ...interface{}) {
|
|
||||||
std.Error(args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Panic logs a message at level Panic on the standard logger.
|
|
||||||
func Panic(args ...interface{}) {
|
|
||||||
std.Panic(args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fatal logs a message at level Fatal on the standard logger.
|
|
||||||
func Fatal(args ...interface{}) {
|
|
||||||
std.Fatal(args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Debugf logs a message at level Debug on the standard logger.
|
|
||||||
func Debugf(format string, args ...interface{}) {
|
|
||||||
std.Debugf(format, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Printf logs a message at level Info on the standard logger.
|
|
||||||
func Printf(format string, args ...interface{}) {
|
|
||||||
std.Printf(format, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Infof logs a message at level Info on the standard logger.
|
|
||||||
func Infof(format string, args ...interface{}) {
|
|
||||||
std.Infof(format, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Warnf logs a message at level Warn on the standard logger.
|
|
||||||
func Warnf(format string, args ...interface{}) {
|
|
||||||
std.Warnf(format, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Warningf logs a message at level Warn on the standard logger.
|
|
||||||
func Warningf(format string, args ...interface{}) {
|
|
||||||
std.Warningf(format, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Errorf logs a message at level Error on the standard logger.
|
|
||||||
func Errorf(format string, args ...interface{}) {
|
|
||||||
std.Errorf(format, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Panicf logs a message at level Panic on the standard logger.
|
|
||||||
func Panicf(format string, args ...interface{}) {
|
|
||||||
std.Panicf(format, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fatalf logs a message at level Fatal on the standard logger.
|
|
||||||
func Fatalf(format string, args ...interface{}) {
|
|
||||||
std.Fatalf(format, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Debugln logs a message at level Debug on the standard logger.
|
|
||||||
func Debugln(args ...interface{}) {
|
|
||||||
std.Debugln(args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Println logs a message at level Info on the standard logger.
|
|
||||||
func Println(args ...interface{}) {
|
|
||||||
std.Println(args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Infoln logs a message at level Info on the standard logger.
|
|
||||||
func Infoln(args ...interface{}) {
|
|
||||||
std.Infoln(args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Warnln logs a message at level Warn on the standard logger.
|
|
||||||
func Warnln(args ...interface{}) {
|
|
||||||
std.Warnln(args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Warningln logs a message at level Warn on the standard logger.
|
|
||||||
func Warningln(args ...interface{}) {
|
|
||||||
std.Warningln(args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Errorln logs a message at level Error on the standard logger.
|
|
||||||
func Errorln(args ...interface{}) {
|
|
||||||
std.Errorln(args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Panicln logs a message at level Panic on the standard logger.
|
|
||||||
func Panicln(args ...interface{}) {
|
|
||||||
std.Panicln(args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fatalln logs a message at level Fatal on the standard logger.
|
|
||||||
func Fatalln(args ...interface{}) {
|
|
||||||
std.Fatalln(args...)
|
|
||||||
}
|
|
||||||
@@ -1,48 +0,0 @@
|
|||||||
package logrus
|
|
||||||
|
|
||||||
import "time"
|
|
||||||
|
|
||||||
const DefaultTimestampFormat = time.RFC3339
|
|
||||||
|
|
||||||
// The Formatter interface is used to implement a custom Formatter. It takes an
|
|
||||||
// `Entry`. It exposes all the fields, including the default ones:
|
|
||||||
//
|
|
||||||
// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
|
|
||||||
// * `entry.Data["time"]`. The timestamp.
|
|
||||||
// * `entry.Data["level"]. The level the entry was logged at.
|
|
||||||
//
|
|
||||||
// Any additional fields added with `WithField` or `WithFields` are also in
|
|
||||||
// `entry.Data`. Format is expected to return an array of bytes which are then
|
|
||||||
// logged to `logger.Out`.
|
|
||||||
type Formatter interface {
|
|
||||||
Format(*Entry) ([]byte, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is to not silently overwrite `time`, `msg` and `level` fields when
|
|
||||||
// dumping it. If this code wasn't there doing:
|
|
||||||
//
|
|
||||||
// logrus.WithField("level", 1).Info("hello")
|
|
||||||
//
|
|
||||||
// Would just silently drop the user provided level. Instead with this code
|
|
||||||
// it'll logged as:
|
|
||||||
//
|
|
||||||
// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
|
|
||||||
//
|
|
||||||
// It's not exported because it's still using Data in an opinionated way. It's to
|
|
||||||
// avoid code duplication between the two default formatters.
|
|
||||||
func prefixFieldClashes(data Fields) {
|
|
||||||
_, ok := data["time"]
|
|
||||||
if ok {
|
|
||||||
data["fields.time"] = data["time"]
|
|
||||||
}
|
|
||||||
|
|
||||||
_, ok = data["msg"]
|
|
||||||
if ok {
|
|
||||||
data["fields.msg"] = data["msg"]
|
|
||||||
}
|
|
||||||
|
|
||||||
_, ok = data["level"]
|
|
||||||
if ok {
|
|
||||||
data["fields.level"] = data["level"]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
34
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/hooks.go
generated
vendored
34
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/hooks.go
generated
vendored
@@ -1,34 +0,0 @@
|
|||||||
package logrus
|
|
||||||
|
|
||||||
// A hook to be fired when logging on the logging levels returned from
|
|
||||||
// `Levels()` on your implementation of the interface. Note that this is not
|
|
||||||
// fired in a goroutine or a channel with workers, you should handle such
|
|
||||||
// functionality yourself if your call is non-blocking and you don't wish for
|
|
||||||
// the logging calls for levels returned from `Levels()` to block.
|
|
||||||
type Hook interface {
|
|
||||||
Levels() []Level
|
|
||||||
Fire(*Entry) error
|
|
||||||
}
|
|
||||||
|
|
||||||
// Internal type for storing the hooks on a logger instance.
|
|
||||||
type LevelHooks map[Level][]Hook
|
|
||||||
|
|
||||||
// Add a hook to an instance of logger. This is called with
|
|
||||||
// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
|
|
||||||
func (hooks LevelHooks) Add(hook Hook) {
|
|
||||||
for _, level := range hook.Levels() {
|
|
||||||
hooks[level] = append(hooks[level], hook)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fire all the hooks for the passed level. Used by `entry.log` to fire
|
|
||||||
// appropriate hooks for a log entry.
|
|
||||||
func (hooks LevelHooks) Fire(level Level, entry *Entry) error {
|
|
||||||
for _, hook := range hooks[level] {
|
|
||||||
if err := hook.Fire(entry); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,41 +0,0 @@
|
|||||||
package logrus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
)
|
|
||||||
|
|
||||||
type JSONFormatter struct {
|
|
||||||
// TimestampFormat sets the format used for marshaling timestamps.
|
|
||||||
TimestampFormat string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
|
|
||||||
data := make(Fields, len(entry.Data)+3)
|
|
||||||
for k, v := range entry.Data {
|
|
||||||
switch v := v.(type) {
|
|
||||||
case error:
|
|
||||||
// Otherwise errors are ignored by `encoding/json`
|
|
||||||
// https://github.com/Sirupsen/logrus/issues/137
|
|
||||||
data[k] = v.Error()
|
|
||||||
default:
|
|
||||||
data[k] = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
prefixFieldClashes(data)
|
|
||||||
|
|
||||||
timestampFormat := f.TimestampFormat
|
|
||||||
if timestampFormat == "" {
|
|
||||||
timestampFormat = DefaultTimestampFormat
|
|
||||||
}
|
|
||||||
|
|
||||||
data["time"] = entry.Time.Format(timestampFormat)
|
|
||||||
data["msg"] = entry.Message
|
|
||||||
data["level"] = entry.Level.String()
|
|
||||||
|
|
||||||
serialized, err := json.Marshal(data)
|
|
||||||
if err != nil {
|
|
||||||
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
|
|
||||||
}
|
|
||||||
return append(serialized, '\n'), nil
|
|
||||||
}
|
|
||||||
212
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.go
generated
vendored
212
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.go
generated
vendored
@@ -1,212 +0,0 @@
|
|||||||
package logrus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
type Logger struct {
|
|
||||||
// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
|
|
||||||
// file, or leave it default which is `os.Stderr`. You can also set this to
|
|
||||||
// something more adventorous, such as logging to Kafka.
|
|
||||||
Out io.Writer
|
|
||||||
// Hooks for the logger instance. These allow firing events based on logging
|
|
||||||
// levels and log entries. For example, to send errors to an error tracking
|
|
||||||
// service, log to StatsD or dump the core on fatal errors.
|
|
||||||
Hooks LevelHooks
|
|
||||||
// All log entries pass through the formatter before logged to Out. The
|
|
||||||
// included formatters are `TextFormatter` and `JSONFormatter` for which
|
|
||||||
// TextFormatter is the default. In development (when a TTY is attached) it
|
|
||||||
// logs with colors, but to a file it wouldn't. You can easily implement your
|
|
||||||
// own that implements the `Formatter` interface, see the `README` or included
|
|
||||||
// formatters for examples.
|
|
||||||
Formatter Formatter
|
|
||||||
// The logging level the logger should log at. This is typically (and defaults
|
|
||||||
// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
|
|
||||||
// logged. `logrus.Debug` is useful in
|
|
||||||
Level Level
|
|
||||||
// Used to sync writing to the log.
|
|
||||||
mu sync.Mutex
|
|
||||||
}
|
|
||||||
|
|
||||||
// Creates a new logger. Configuration should be set by changing `Formatter`,
|
|
||||||
// `Out` and `Hooks` directly on the default logger instance. You can also just
|
|
||||||
// instantiate your own:
|
|
||||||
//
|
|
||||||
// var log = &Logger{
|
|
||||||
// Out: os.Stderr,
|
|
||||||
// Formatter: new(JSONFormatter),
|
|
||||||
// Hooks: make(LevelHooks),
|
|
||||||
// Level: logrus.DebugLevel,
|
|
||||||
// }
|
|
||||||
//
|
|
||||||
// It's recommended to make this a global instance called `log`.
|
|
||||||
func New() *Logger {
|
|
||||||
return &Logger{
|
|
||||||
Out: os.Stderr,
|
|
||||||
Formatter: new(TextFormatter),
|
|
||||||
Hooks: make(LevelHooks),
|
|
||||||
Level: InfoLevel,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Adds a field to the log entry, note that you it doesn't log until you call
|
|
||||||
// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
|
|
||||||
// If you want multiple fields, use `WithFields`.
|
|
||||||
func (logger *Logger) WithField(key string, value interface{}) *Entry {
|
|
||||||
return NewEntry(logger).WithField(key, value)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Adds a struct of fields to the log entry. All it does is call `WithField` for
|
|
||||||
// each `Field`.
|
|
||||||
func (logger *Logger) WithFields(fields Fields) *Entry {
|
|
||||||
return NewEntry(logger).WithFields(fields)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add an error as single field to the log entry. All it does is call
|
|
||||||
// `WithError` for the given `error`.
|
|
||||||
func (logger *Logger) WithError(err error) *Entry {
|
|
||||||
return NewEntry(logger).WithError(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (logger *Logger) Debugf(format string, args ...interface{}) {
|
|
||||||
if logger.Level >= DebugLevel {
|
|
||||||
NewEntry(logger).Debugf(format, args...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (logger *Logger) Infof(format string, args ...interface{}) {
|
|
||||||
if logger.Level >= InfoLevel {
|
|
||||||
NewEntry(logger).Infof(format, args...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (logger *Logger) Printf(format string, args ...interface{}) {
|
|
||||||
NewEntry(logger).Printf(format, args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (logger *Logger) Warnf(format string, args ...interface{}) {
|
|
||||||
if logger.Level >= WarnLevel {
|
|
||||||
NewEntry(logger).Warnf(format, args...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (logger *Logger) Warningf(format string, args ...interface{}) {
|
|
||||||
if logger.Level >= WarnLevel {
|
|
||||||
NewEntry(logger).Warnf(format, args...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (logger *Logger) Errorf(format string, args ...interface{}) {
|
|
||||||
if logger.Level >= ErrorLevel {
|
|
||||||
NewEntry(logger).Errorf(format, args...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (logger *Logger) Fatalf(format string, args ...interface{}) {
|
|
||||||
if logger.Level >= FatalLevel {
|
|
||||||
NewEntry(logger).Fatalf(format, args...)
|
|
||||||
}
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (logger *Logger) Panicf(format string, args ...interface{}) {
|
|
||||||
if logger.Level >= PanicLevel {
|
|
||||||
NewEntry(logger).Panicf(format, args...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (logger *Logger) Debug(args ...interface{}) {
|
|
||||||
if logger.Level >= DebugLevel {
|
|
||||||
NewEntry(logger).Debug(args...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (logger *Logger) Info(args ...interface{}) {
|
|
||||||
if logger.Level >= InfoLevel {
|
|
||||||
NewEntry(logger).Info(args...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (logger *Logger) Print(args ...interface{}) {
|
|
||||||
NewEntry(logger).Info(args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (logger *Logger) Warn(args ...interface{}) {
|
|
||||||
if logger.Level >= WarnLevel {
|
|
||||||
NewEntry(logger).Warn(args...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (logger *Logger) Warning(args ...interface{}) {
|
|
||||||
if logger.Level >= WarnLevel {
|
|
||||||
NewEntry(logger).Warn(args...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (logger *Logger) Error(args ...interface{}) {
|
|
||||||
if logger.Level >= ErrorLevel {
|
|
||||||
NewEntry(logger).Error(args...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (logger *Logger) Fatal(args ...interface{}) {
|
|
||||||
if logger.Level >= FatalLevel {
|
|
||||||
NewEntry(logger).Fatal(args...)
|
|
||||||
}
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (logger *Logger) Panic(args ...interface{}) {
|
|
||||||
if logger.Level >= PanicLevel {
|
|
||||||
NewEntry(logger).Panic(args...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (logger *Logger) Debugln(args ...interface{}) {
|
|
||||||
if logger.Level >= DebugLevel {
|
|
||||||
NewEntry(logger).Debugln(args...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (logger *Logger) Infoln(args ...interface{}) {
|
|
||||||
if logger.Level >= InfoLevel {
|
|
||||||
NewEntry(logger).Infoln(args...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (logger *Logger) Println(args ...interface{}) {
|
|
||||||
NewEntry(logger).Println(args...)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (logger *Logger) Warnln(args ...interface{}) {
|
|
||||||
if logger.Level >= WarnLevel {
|
|
||||||
NewEntry(logger).Warnln(args...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (logger *Logger) Warningln(args ...interface{}) {
|
|
||||||
if logger.Level >= WarnLevel {
|
|
||||||
NewEntry(logger).Warnln(args...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (logger *Logger) Errorln(args ...interface{}) {
|
|
||||||
if logger.Level >= ErrorLevel {
|
|
||||||
NewEntry(logger).Errorln(args...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (logger *Logger) Fatalln(args ...interface{}) {
|
|
||||||
if logger.Level >= FatalLevel {
|
|
||||||
NewEntry(logger).Fatalln(args...)
|
|
||||||
}
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (logger *Logger) Panicln(args ...interface{}) {
|
|
||||||
if logger.Level >= PanicLevel {
|
|
||||||
NewEntry(logger).Panicln(args...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
98
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logrus.go
generated
vendored
98
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logrus.go
generated
vendored
@@ -1,98 +0,0 @@
|
|||||||
package logrus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"log"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Fields type, used to pass to `WithFields`.
|
|
||||||
type Fields map[string]interface{}
|
|
||||||
|
|
||||||
// Level type
|
|
||||||
type Level uint8
|
|
||||||
|
|
||||||
// Convert the Level to a string. E.g. PanicLevel becomes "panic".
|
|
||||||
func (level Level) String() string {
|
|
||||||
switch level {
|
|
||||||
case DebugLevel:
|
|
||||||
return "debug"
|
|
||||||
case InfoLevel:
|
|
||||||
return "info"
|
|
||||||
case WarnLevel:
|
|
||||||
return "warning"
|
|
||||||
case ErrorLevel:
|
|
||||||
return "error"
|
|
||||||
case FatalLevel:
|
|
||||||
return "fatal"
|
|
||||||
case PanicLevel:
|
|
||||||
return "panic"
|
|
||||||
}
|
|
||||||
|
|
||||||
return "unknown"
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseLevel takes a string level and returns the Logrus log level constant.
|
|
||||||
func ParseLevel(lvl string) (Level, error) {
|
|
||||||
switch lvl {
|
|
||||||
case "panic":
|
|
||||||
return PanicLevel, nil
|
|
||||||
case "fatal":
|
|
||||||
return FatalLevel, nil
|
|
||||||
case "error":
|
|
||||||
return ErrorLevel, nil
|
|
||||||
case "warn", "warning":
|
|
||||||
return WarnLevel, nil
|
|
||||||
case "info":
|
|
||||||
return InfoLevel, nil
|
|
||||||
case "debug":
|
|
||||||
return DebugLevel, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var l Level
|
|
||||||
return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
|
|
||||||
}
|
|
||||||
|
|
||||||
// These are the different logging levels. You can set the logging level to log
|
|
||||||
// on your instance of logger, obtained with `logrus.New()`.
|
|
||||||
const (
|
|
||||||
// PanicLevel level, highest level of severity. Logs and then calls panic with the
|
|
||||||
// message passed to Debug, Info, ...
|
|
||||||
PanicLevel Level = iota
|
|
||||||
// FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
|
|
||||||
// logging level is set to Panic.
|
|
||||||
FatalLevel
|
|
||||||
// ErrorLevel level. Logs. Used for errors that should definitely be noted.
|
|
||||||
// Commonly used for hooks to send errors to an error tracking service.
|
|
||||||
ErrorLevel
|
|
||||||
// WarnLevel level. Non-critical entries that deserve eyes.
|
|
||||||
WarnLevel
|
|
||||||
// InfoLevel level. General operational entries about what's going on inside the
|
|
||||||
// application.
|
|
||||||
InfoLevel
|
|
||||||
// DebugLevel level. Usually only enabled when debugging. Very verbose logging.
|
|
||||||
DebugLevel
|
|
||||||
)
|
|
||||||
|
|
||||||
// Won't compile if StdLogger can't be realized by a log.Logger
|
|
||||||
var (
|
|
||||||
_ StdLogger = &log.Logger{}
|
|
||||||
_ StdLogger = &Entry{}
|
|
||||||
_ StdLogger = &Logger{}
|
|
||||||
)
|
|
||||||
|
|
||||||
// StdLogger is what your logrus-enabled library should take, that way
|
|
||||||
// it'll accept a stdlib logger and a logrus logger. There's no standard
|
|
||||||
// interface, this is the closest we get, unfortunately.
|
|
||||||
type StdLogger interface {
|
|
||||||
Print(...interface{})
|
|
||||||
Printf(string, ...interface{})
|
|
||||||
Println(...interface{})
|
|
||||||
|
|
||||||
Fatal(...interface{})
|
|
||||||
Fatalf(string, ...interface{})
|
|
||||||
Fatalln(...interface{})
|
|
||||||
|
|
||||||
Panic(...interface{})
|
|
||||||
Panicf(string, ...interface{})
|
|
||||||
Panicln(...interface{})
|
|
||||||
}
|
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
// +build darwin freebsd openbsd netbsd dragonfly
|
|
||||||
|
|
||||||
package logrus
|
|
||||||
|
|
||||||
import "syscall"
|
|
||||||
|
|
||||||
const ioctlReadTermios = syscall.TIOCGETA
|
|
||||||
|
|
||||||
type Termios syscall.Termios
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
// Based on ssh/terminal:
|
|
||||||
// Copyright 2013 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
package logrus
|
|
||||||
|
|
||||||
import "syscall"
|
|
||||||
|
|
||||||
const ioctlReadTermios = syscall.TCGETS
|
|
||||||
|
|
||||||
type Termios syscall.Termios
|
|
||||||
@@ -1,21 +0,0 @@
|
|||||||
// Based on ssh/terminal:
|
|
||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build linux darwin freebsd openbsd netbsd dragonfly
|
|
||||||
|
|
||||||
package logrus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"syscall"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
// IsTerminal returns true if stderr's file descriptor is a terminal.
|
|
||||||
func IsTerminal() bool {
|
|
||||||
fd := syscall.Stderr
|
|
||||||
var termios Termios
|
|
||||||
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
|
|
||||||
return err == 0
|
|
||||||
}
|
|
||||||
@@ -1,15 +0,0 @@
|
|||||||
// +build solaris
|
|
||||||
|
|
||||||
package logrus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix"
|
|
||||||
)
|
|
||||||
|
|
||||||
// IsTerminal returns true if the given file descriptor is a terminal.
|
|
||||||
func IsTerminal() bool {
|
|
||||||
_, err := unix.IoctlGetTermios(int(os.Stdout.Fd()), unix.TCGETA)
|
|
||||||
return err == nil
|
|
||||||
}
|
|
||||||
@@ -1,27 +0,0 @@
|
|||||||
// Based on ssh/terminal:
|
|
||||||
// Copyright 2011 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// +build windows
|
|
||||||
|
|
||||||
package logrus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"syscall"
|
|
||||||
"unsafe"
|
|
||||||
)
|
|
||||||
|
|
||||||
var kernel32 = syscall.NewLazyDLL("kernel32.dll")
|
|
||||||
|
|
||||||
var (
|
|
||||||
procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
|
|
||||||
)
|
|
||||||
|
|
||||||
// IsTerminal returns true if stderr's file descriptor is a terminal.
|
|
||||||
func IsTerminal() bool {
|
|
||||||
fd := syscall.Stderr
|
|
||||||
var st uint32
|
|
||||||
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
|
|
||||||
return r != 0 && e == 0
|
|
||||||
}
|
|
||||||
@@ -1,161 +0,0 @@
|
|||||||
package logrus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"runtime"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
nocolor = 0
|
|
||||||
red = 31
|
|
||||||
green = 32
|
|
||||||
yellow = 33
|
|
||||||
blue = 34
|
|
||||||
gray = 37
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
baseTimestamp time.Time
|
|
||||||
isTerminal bool
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
baseTimestamp = time.Now()
|
|
||||||
isTerminal = IsTerminal()
|
|
||||||
}
|
|
||||||
|
|
||||||
func miniTS() int {
|
|
||||||
return int(time.Since(baseTimestamp) / time.Second)
|
|
||||||
}
|
|
||||||
|
|
||||||
type TextFormatter struct {
|
|
||||||
// Set to true to bypass checking for a TTY before outputting colors.
|
|
||||||
ForceColors bool
|
|
||||||
|
|
||||||
// Force disabling colors.
|
|
||||||
DisableColors bool
|
|
||||||
|
|
||||||
// Disable timestamp logging. useful when output is redirected to logging
|
|
||||||
// system that already adds timestamps.
|
|
||||||
DisableTimestamp bool
|
|
||||||
|
|
||||||
// Enable logging the full timestamp when a TTY is attached instead of just
|
|
||||||
// the time passed since beginning of execution.
|
|
||||||
FullTimestamp bool
|
|
||||||
|
|
||||||
// TimestampFormat to use for display when a full timestamp is printed
|
|
||||||
TimestampFormat string
|
|
||||||
|
|
||||||
// The fields are sorted by default for a consistent output. For applications
|
|
||||||
// that log extremely frequently and don't use the JSON formatter this may not
|
|
||||||
// be desired.
|
|
||||||
DisableSorting bool
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
|
|
||||||
var keys []string = make([]string, 0, len(entry.Data))
|
|
||||||
for k := range entry.Data {
|
|
||||||
keys = append(keys, k)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !f.DisableSorting {
|
|
||||||
sort.Strings(keys)
|
|
||||||
}
|
|
||||||
|
|
||||||
b := &bytes.Buffer{}
|
|
||||||
|
|
||||||
prefixFieldClashes(entry.Data)
|
|
||||||
|
|
||||||
isColorTerminal := isTerminal && (runtime.GOOS != "windows")
|
|
||||||
isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors
|
|
||||||
|
|
||||||
timestampFormat := f.TimestampFormat
|
|
||||||
if timestampFormat == "" {
|
|
||||||
timestampFormat = DefaultTimestampFormat
|
|
||||||
}
|
|
||||||
if isColored {
|
|
||||||
f.printColored(b, entry, keys, timestampFormat)
|
|
||||||
} else {
|
|
||||||
if !f.DisableTimestamp {
|
|
||||||
f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat))
|
|
||||||
}
|
|
||||||
f.appendKeyValue(b, "level", entry.Level.String())
|
|
||||||
if entry.Message != "" {
|
|
||||||
f.appendKeyValue(b, "msg", entry.Message)
|
|
||||||
}
|
|
||||||
for _, key := range keys {
|
|
||||||
f.appendKeyValue(b, key, entry.Data[key])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
b.WriteByte('\n')
|
|
||||||
return b.Bytes(), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) {
|
|
||||||
var levelColor int
|
|
||||||
switch entry.Level {
|
|
||||||
case DebugLevel:
|
|
||||||
levelColor = gray
|
|
||||||
case WarnLevel:
|
|
||||||
levelColor = yellow
|
|
||||||
case ErrorLevel, FatalLevel, PanicLevel:
|
|
||||||
levelColor = red
|
|
||||||
default:
|
|
||||||
levelColor = blue
|
|
||||||
}
|
|
||||||
|
|
||||||
levelText := strings.ToUpper(entry.Level.String())[0:4]
|
|
||||||
|
|
||||||
if !f.FullTimestamp {
|
|
||||||
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message)
|
|
||||||
} else {
|
|
||||||
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
|
|
||||||
}
|
|
||||||
for _, k := range keys {
|
|
||||||
v := entry.Data[k]
|
|
||||||
fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%+v", levelColor, k, v)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func needsQuoting(text string) bool {
|
|
||||||
for _, ch := range text {
|
|
||||||
if !((ch >= 'a' && ch <= 'z') ||
|
|
||||||
(ch >= 'A' && ch <= 'Z') ||
|
|
||||||
(ch >= '0' && ch <= '9') ||
|
|
||||||
ch == '-' || ch == '.') {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
|
|
||||||
|
|
||||||
b.WriteString(key)
|
|
||||||
b.WriteByte('=')
|
|
||||||
|
|
||||||
switch value := value.(type) {
|
|
||||||
case string:
|
|
||||||
if needsQuoting(value) {
|
|
||||||
b.WriteString(value)
|
|
||||||
} else {
|
|
||||||
fmt.Fprintf(b, "%q", value)
|
|
||||||
}
|
|
||||||
case error:
|
|
||||||
errmsg := value.Error()
|
|
||||||
if needsQuoting(errmsg) {
|
|
||||||
b.WriteString(errmsg)
|
|
||||||
} else {
|
|
||||||
fmt.Fprintf(b, "%q", value)
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
fmt.Fprint(b, value)
|
|
||||||
}
|
|
||||||
|
|
||||||
b.WriteByte(' ')
|
|
||||||
}
|
|
||||||
31
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/writer.go
generated
vendored
31
vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/writer.go
generated
vendored
@@ -1,31 +0,0 @@
|
|||||||
package logrus
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"io"
|
|
||||||
"runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
func (logger *Logger) Writer() *io.PipeWriter {
|
|
||||||
reader, writer := io.Pipe()
|
|
||||||
|
|
||||||
go logger.writerScanner(reader)
|
|
||||||
runtime.SetFinalizer(writer, writerFinalizer)
|
|
||||||
|
|
||||||
return writer
|
|
||||||
}
|
|
||||||
|
|
||||||
func (logger *Logger) writerScanner(reader *io.PipeReader) {
|
|
||||||
scanner := bufio.NewScanner(reader)
|
|
||||||
for scanner.Scan() {
|
|
||||||
logger.Print(scanner.Text())
|
|
||||||
}
|
|
||||||
if err := scanner.Err(); err != nil {
|
|
||||||
logger.Errorf("Error while reading from Writer: %s", err)
|
|
||||||
}
|
|
||||||
reader.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
func writerFinalizer(writer *io.PipeWriter) {
|
|
||||||
writer.Close()
|
|
||||||
}
|
|
||||||
@@ -1,67 +0,0 @@
|
|||||||
package opts
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ParseEnvFile reads a file with environment variables enumerated by lines
|
|
||||||
//
|
|
||||||
// ``Environment variable names used by the utilities in the Shell and
|
|
||||||
// Utilities volume of IEEE Std 1003.1-2001 consist solely of uppercase
|
|
||||||
// letters, digits, and the '_' (underscore) from the characters defined in
|
|
||||||
// Portable Character Set and do not begin with a digit. *But*, other
|
|
||||||
// characters may be permitted by an implementation; applications shall
|
|
||||||
// tolerate the presence of such names.''
|
|
||||||
// -- http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap08.html
|
|
||||||
//
|
|
||||||
// As of #16585, it's up to application inside docker to validate or not
|
|
||||||
// environment variables, that's why we just strip leading whitespace and
|
|
||||||
// nothing more.
|
|
||||||
func ParseEnvFile(filename string) ([]string, error) {
|
|
||||||
fh, err := os.Open(filename)
|
|
||||||
if err != nil {
|
|
||||||
return []string{}, err
|
|
||||||
}
|
|
||||||
defer fh.Close()
|
|
||||||
|
|
||||||
lines := []string{}
|
|
||||||
scanner := bufio.NewScanner(fh)
|
|
||||||
for scanner.Scan() {
|
|
||||||
// trim the line from all leading whitespace first
|
|
||||||
line := strings.TrimLeft(scanner.Text(), whiteSpaces)
|
|
||||||
// line is not empty, and not starting with '#'
|
|
||||||
if len(line) > 0 && !strings.HasPrefix(line, "#") {
|
|
||||||
data := strings.SplitN(line, "=", 2)
|
|
||||||
|
|
||||||
// trim the front of a variable, but nothing else
|
|
||||||
variable := strings.TrimLeft(data[0], whiteSpaces)
|
|
||||||
if strings.ContainsAny(variable, whiteSpaces) {
|
|
||||||
return []string{}, ErrBadEnvVariable{fmt.Sprintf("variable '%s' has white spaces", variable)}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(data) > 1 {
|
|
||||||
|
|
||||||
// pass the value through, no trimming
|
|
||||||
lines = append(lines, fmt.Sprintf("%s=%s", variable, data[1]))
|
|
||||||
} else {
|
|
||||||
// if only a pass-through variable is given, clean it up.
|
|
||||||
lines = append(lines, fmt.Sprintf("%s=%s", strings.TrimSpace(line), os.Getenv(line)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return lines, scanner.Err()
|
|
||||||
}
|
|
||||||
|
|
||||||
var whiteSpaces = " \t"
|
|
||||||
|
|
||||||
// ErrBadEnvVariable typed error for bad environment variable
|
|
||||||
type ErrBadEnvVariable struct {
|
|
||||||
msg string
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e ErrBadEnvVariable) Error() string {
|
|
||||||
return fmt.Sprintf("poorly formatted environment: %s", e.msg)
|
|
||||||
}
|
|
||||||
146
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts.go
generated
vendored
146
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts.go
generated
vendored
@@ -1,146 +0,0 @@
|
|||||||
package opts
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
"net/url"
|
|
||||||
"runtime"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. docker daemon -H tcp://
|
|
||||||
// TODO Windows. DefaultHTTPPort is only used on Windows if a -H parameter
|
|
||||||
// is not supplied. A better longer term solution would be to use a named
|
|
||||||
// pipe as the default on the Windows daemon.
|
|
||||||
// These are the IANA registered port numbers for use with Docker
|
|
||||||
// see http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker
|
|
||||||
DefaultHTTPPort = 2375 // Default HTTP Port
|
|
||||||
// DefaultTLSHTTPPort Default HTTP Port used when TLS enabled
|
|
||||||
DefaultTLSHTTPPort = 2376 // Default TLS encrypted HTTP Port
|
|
||||||
// DefaultUnixSocket Path for the unix socket.
|
|
||||||
// Docker daemon by default always listens on the default unix socket
|
|
||||||
DefaultUnixSocket = "/var/run/docker.sock"
|
|
||||||
// DefaultTCPHost constant defines the default host string used by docker on Windows
|
|
||||||
DefaultTCPHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort)
|
|
||||||
// DefaultTLSHost constant defines the default host string used by docker for TLS sockets
|
|
||||||
DefaultTLSHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultTLSHTTPPort)
|
|
||||||
)
|
|
||||||
|
|
||||||
// ValidateHost validates that the specified string is a valid host and returns it.
|
|
||||||
func ValidateHost(val string) (string, error) {
|
|
||||||
_, err := parseDockerDaemonHost(DefaultTCPHost, DefaultTLSHost, DefaultUnixSocket, "", val)
|
|
||||||
if err != nil {
|
|
||||||
return val, err
|
|
||||||
}
|
|
||||||
// Note: unlike most flag validators, we don't return the mutated value here
|
|
||||||
// we need to know what the user entered later (using ParseHost) to adjust for tls
|
|
||||||
return val, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ParseHost and set defaults for a Daemon host string
|
|
||||||
func ParseHost(defaultHost, val string) (string, error) {
|
|
||||||
host, err := parseDockerDaemonHost(DefaultTCPHost, DefaultTLSHost, DefaultUnixSocket, defaultHost, val)
|
|
||||||
if err != nil {
|
|
||||||
return val, err
|
|
||||||
}
|
|
||||||
return host, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseDockerDaemonHost parses the specified address and returns an address that will be used as the host.
|
|
||||||
// Depending of the address specified, will use the defaultTCPAddr or defaultUnixAddr
|
|
||||||
// defaultUnixAddr must be a absolute file path (no `unix://` prefix)
|
|
||||||
// defaultTCPAddr must be the full `tcp://host:port` form
|
|
||||||
func parseDockerDaemonHost(defaultTCPAddr, defaultTLSHost, defaultUnixAddr, defaultAddr, addr string) (string, error) {
|
|
||||||
addr = strings.TrimSpace(addr)
|
|
||||||
if addr == "" {
|
|
||||||
if defaultAddr == defaultTLSHost {
|
|
||||||
return defaultTLSHost, nil
|
|
||||||
}
|
|
||||||
if runtime.GOOS != "windows" {
|
|
||||||
return fmt.Sprintf("unix://%s", defaultUnixAddr), nil
|
|
||||||
}
|
|
||||||
return defaultTCPAddr, nil
|
|
||||||
}
|
|
||||||
addrParts := strings.Split(addr, "://")
|
|
||||||
if len(addrParts) == 1 {
|
|
||||||
addrParts = []string{"tcp", addrParts[0]}
|
|
||||||
}
|
|
||||||
|
|
||||||
switch addrParts[0] {
|
|
||||||
case "tcp":
|
|
||||||
return parseTCPAddr(addrParts[1], defaultTCPAddr)
|
|
||||||
case "unix":
|
|
||||||
return parseUnixAddr(addrParts[1], defaultUnixAddr)
|
|
||||||
case "fd":
|
|
||||||
return addr, nil
|
|
||||||
default:
|
|
||||||
return "", fmt.Errorf("Invalid bind address format: %s", addr)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseUnixAddr parses and validates that the specified address is a valid UNIX
|
|
||||||
// socket address. It returns a formatted UNIX socket address, either using the
|
|
||||||
// address parsed from addr, or the contents of defaultAddr if addr is a blank
|
|
||||||
// string.
|
|
||||||
func parseUnixAddr(addr string, defaultAddr string) (string, error) {
|
|
||||||
addr = strings.TrimPrefix(addr, "unix://")
|
|
||||||
if strings.Contains(addr, "://") {
|
|
||||||
return "", fmt.Errorf("Invalid proto, expected unix: %s", addr)
|
|
||||||
}
|
|
||||||
if addr == "" {
|
|
||||||
addr = defaultAddr
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("unix://%s", addr), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseTCPAddr parses and validates that the specified address is a valid TCP
|
|
||||||
// address. It returns a formatted TCP address, either using the address parsed
|
|
||||||
// from tryAddr, or the contents of defaultAddr if tryAddr is a blank string.
|
|
||||||
// tryAddr is expected to have already been Trim()'d
|
|
||||||
// defaultAddr must be in the full `tcp://host:port` form
|
|
||||||
func parseTCPAddr(tryAddr string, defaultAddr string) (string, error) {
|
|
||||||
if tryAddr == "" || tryAddr == "tcp://" {
|
|
||||||
return defaultAddr, nil
|
|
||||||
}
|
|
||||||
addr := strings.TrimPrefix(tryAddr, "tcp://")
|
|
||||||
if strings.Contains(addr, "://") || addr == "" {
|
|
||||||
return "", fmt.Errorf("Invalid proto, expected tcp: %s", tryAddr)
|
|
||||||
}
|
|
||||||
|
|
||||||
defaultAddr = strings.TrimPrefix(defaultAddr, "tcp://")
|
|
||||||
defaultHost, defaultPort, err := net.SplitHostPort(defaultAddr)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
// url.Parse fails for trailing colon on IPv6 brackets on Go 1.5, but
|
|
||||||
// not 1.4. See https://github.com/golang/go/issues/12200 and
|
|
||||||
// https://github.com/golang/go/issues/6530.
|
|
||||||
if strings.HasSuffix(addr, "]:") {
|
|
||||||
addr += defaultPort
|
|
||||||
}
|
|
||||||
|
|
||||||
u, err := url.Parse("tcp://" + addr)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
host, port, err := net.SplitHostPort(u.Host)
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("Invalid bind address format: %s", tryAddr)
|
|
||||||
}
|
|
||||||
|
|
||||||
if host == "" {
|
|
||||||
host = defaultHost
|
|
||||||
}
|
|
||||||
if port == "" {
|
|
||||||
port = defaultPort
|
|
||||||
}
|
|
||||||
p, err := strconv.Atoi(port)
|
|
||||||
if err != nil && p == 0 {
|
|
||||||
return "", fmt.Errorf("Invalid bind address format: %s", tryAddr)
|
|
||||||
}
|
|
||||||
|
|
||||||
return fmt.Sprintf("tcp://%s%s", net.JoinHostPort(host, port), u.Path), nil
|
|
||||||
}
|
|
||||||
@@ -1,8 +0,0 @@
|
|||||||
// +build !windows
|
|
||||||
|
|
||||||
package opts
|
|
||||||
|
|
||||||
import "fmt"
|
|
||||||
|
|
||||||
// DefaultHost constant defines the default host string used by docker on other hosts than Windows
|
|
||||||
var DefaultHost = fmt.Sprintf("unix://%s", DefaultUnixSocket)
|
|
||||||
@@ -1,6 +0,0 @@
|
|||||||
// +build windows
|
|
||||||
|
|
||||||
package opts
|
|
||||||
|
|
||||||
// DefaultHost constant defines the default host string used by docker on Windows
|
|
||||||
var DefaultHost = DefaultTCPHost
|
|
||||||
42
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ip.go
generated
vendored
42
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ip.go
generated
vendored
@@ -1,42 +0,0 @@
|
|||||||
package opts
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
)
|
|
||||||
|
|
||||||
// IPOpt holds an IP. It is used to store values from CLI flags.
|
|
||||||
type IPOpt struct {
|
|
||||||
*net.IP
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewIPOpt creates a new IPOpt from a reference net.IP and a
|
|
||||||
// string representation of an IP. If the string is not a valid
|
|
||||||
// IP it will fallback to the specified reference.
|
|
||||||
func NewIPOpt(ref *net.IP, defaultVal string) *IPOpt {
|
|
||||||
o := &IPOpt{
|
|
||||||
IP: ref,
|
|
||||||
}
|
|
||||||
o.Set(defaultVal)
|
|
||||||
return o
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set sets an IPv4 or IPv6 address from a given string. If the given
|
|
||||||
// string is not parseable as an IP address it returns an error.
|
|
||||||
func (o *IPOpt) Set(val string) error {
|
|
||||||
ip := net.ParseIP(val)
|
|
||||||
if ip == nil {
|
|
||||||
return fmt.Errorf("%s is not an ip address", val)
|
|
||||||
}
|
|
||||||
*o.IP = ip
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// String returns the IP address stored in the IPOpt. If stored IP is a
|
|
||||||
// nil pointer, it returns an empty string.
|
|
||||||
func (o *IPOpt) String() string {
|
|
||||||
if *o.IP == nil {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return o.IP.String()
|
|
||||||
}
|
|
||||||
252
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.go
generated
vendored
252
vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.go
generated
vendored
@@ -1,252 +0,0 @@
|
|||||||
package opts
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net"
|
|
||||||
"os"
|
|
||||||
"regexp"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
alphaRegexp = regexp.MustCompile(`[a-zA-Z]`)
|
|
||||||
domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`)
|
|
||||||
)
|
|
||||||
|
|
||||||
// ListOpts holds a list of values and a validation function.
|
|
||||||
type ListOpts struct {
|
|
||||||
values *[]string
|
|
||||||
validator ValidatorFctType
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewListOpts creates a new ListOpts with the specified validator.
|
|
||||||
func NewListOpts(validator ValidatorFctType) ListOpts {
|
|
||||||
var values []string
|
|
||||||
return *NewListOptsRef(&values, validator)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewListOptsRef creates a new ListOpts with the specified values and validator.
|
|
||||||
func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts {
|
|
||||||
return &ListOpts{
|
|
||||||
values: values,
|
|
||||||
validator: validator,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (opts *ListOpts) String() string {
|
|
||||||
return fmt.Sprintf("%v", []string((*opts.values)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set validates if needed the input value and add it to the
|
|
||||||
// internal slice.
|
|
||||||
func (opts *ListOpts) Set(value string) error {
|
|
||||||
if opts.validator != nil {
|
|
||||||
v, err := opts.validator(value)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
value = v
|
|
||||||
}
|
|
||||||
(*opts.values) = append((*opts.values), value)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Delete removes the specified element from the slice.
|
|
||||||
func (opts *ListOpts) Delete(key string) {
|
|
||||||
for i, k := range *opts.values {
|
|
||||||
if k == key {
|
|
||||||
(*opts.values) = append((*opts.values)[:i], (*opts.values)[i+1:]...)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetMap returns the content of values in a map in order to avoid
|
|
||||||
// duplicates.
|
|
||||||
func (opts *ListOpts) GetMap() map[string]struct{} {
|
|
||||||
ret := make(map[string]struct{})
|
|
||||||
for _, k := range *opts.values {
|
|
||||||
ret[k] = struct{}{}
|
|
||||||
}
|
|
||||||
return ret
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetAll returns the values of slice.
|
|
||||||
func (opts *ListOpts) GetAll() []string {
|
|
||||||
return (*opts.values)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetAllOrEmpty returns the values of the slice
|
|
||||||
// or an empty slice when there are no values.
|
|
||||||
func (opts *ListOpts) GetAllOrEmpty() []string {
|
|
||||||
v := *opts.values
|
|
||||||
if v == nil {
|
|
||||||
return make([]string, 0)
|
|
||||||
}
|
|
||||||
return v
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get checks the existence of the specified key.
|
|
||||||
func (opts *ListOpts) Get(key string) bool {
|
|
||||||
for _, k := range *opts.values {
|
|
||||||
if k == key {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// Len returns the amount of element in the slice.
|
|
||||||
func (opts *ListOpts) Len() int {
|
|
||||||
return len((*opts.values))
|
|
||||||
}
|
|
||||||
|
|
||||||
//MapOpts holds a map of values and a validation function.
|
|
||||||
type MapOpts struct {
|
|
||||||
values map[string]string
|
|
||||||
validator ValidatorFctType
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set validates if needed the input value and add it to the
|
|
||||||
// internal map, by splitting on '='.
|
|
||||||
func (opts *MapOpts) Set(value string) error {
|
|
||||||
if opts.validator != nil {
|
|
||||||
v, err := opts.validator(value)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
value = v
|
|
||||||
}
|
|
||||||
vals := strings.SplitN(value, "=", 2)
|
|
||||||
if len(vals) == 1 {
|
|
||||||
(opts.values)[vals[0]] = ""
|
|
||||||
} else {
|
|
||||||
(opts.values)[vals[0]] = vals[1]
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetAll returns the values of MapOpts as a map.
|
|
||||||
func (opts *MapOpts) GetAll() map[string]string {
|
|
||||||
return opts.values
|
|
||||||
}
|
|
||||||
|
|
||||||
func (opts *MapOpts) String() string {
|
|
||||||
return fmt.Sprintf("%v", map[string]string((opts.values)))
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewMapOpts creates a new MapOpts with the specified map of values and a validator.
|
|
||||||
func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts {
|
|
||||||
if values == nil {
|
|
||||||
values = make(map[string]string)
|
|
||||||
}
|
|
||||||
return &MapOpts{
|
|
||||||
values: values,
|
|
||||||
validator: validator,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidatorFctType defines a validator function that returns a validated string and/or an error.
|
|
||||||
type ValidatorFctType func(val string) (string, error)
|
|
||||||
|
|
||||||
// ValidatorFctListType defines a validator function that returns a validated list of string and/or an error
|
|
||||||
type ValidatorFctListType func(val string) ([]string, error)
|
|
||||||
|
|
||||||
// ValidateAttach validates that the specified string is a valid attach option.
|
|
||||||
func ValidateAttach(val string) (string, error) {
|
|
||||||
s := strings.ToLower(val)
|
|
||||||
for _, str := range []string{"stdin", "stdout", "stderr"} {
|
|
||||||
if s == str {
|
|
||||||
return s, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return val, fmt.Errorf("valid streams are STDIN, STDOUT and STDERR")
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidateEnv validates an environment variable and returns it.
|
|
||||||
// If no value is specified, it returns the current value using os.Getenv.
|
|
||||||
//
|
|
||||||
// As on ParseEnvFile and related to #16585, environment variable names
|
|
||||||
// are not validate what so ever, it's up to application inside docker
|
|
||||||
// to validate them or not.
|
|
||||||
func ValidateEnv(val string) (string, error) {
|
|
||||||
arr := strings.Split(val, "=")
|
|
||||||
if len(arr) > 1 {
|
|
||||||
return val, nil
|
|
||||||
}
|
|
||||||
if !doesEnvExist(val) {
|
|
||||||
return val, nil
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidateIPAddress validates an Ip address.
|
|
||||||
func ValidateIPAddress(val string) (string, error) {
|
|
||||||
var ip = net.ParseIP(strings.TrimSpace(val))
|
|
||||||
if ip != nil {
|
|
||||||
return ip.String(), nil
|
|
||||||
}
|
|
||||||
return "", fmt.Errorf("%s is not an ip address", val)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidateMACAddress validates a MAC address.
|
|
||||||
func ValidateMACAddress(val string) (string, error) {
|
|
||||||
_, err := net.ParseMAC(strings.TrimSpace(val))
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return val, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidateDNSSearch validates domain for resolvconf search configuration.
|
|
||||||
// A zero length domain is represented by a dot (.).
|
|
||||||
func ValidateDNSSearch(val string) (string, error) {
|
|
||||||
if val = strings.Trim(val, " "); val == "." {
|
|
||||||
return val, nil
|
|
||||||
}
|
|
||||||
return validateDomain(val)
|
|
||||||
}
|
|
||||||
|
|
||||||
func validateDomain(val string) (string, error) {
|
|
||||||
if alphaRegexp.FindString(val) == "" {
|
|
||||||
return "", fmt.Errorf("%s is not a valid domain", val)
|
|
||||||
}
|
|
||||||
ns := domainRegexp.FindSubmatch([]byte(val))
|
|
||||||
if len(ns) > 0 && len(ns[1]) < 255 {
|
|
||||||
return string(ns[1]), nil
|
|
||||||
}
|
|
||||||
return "", fmt.Errorf("%s is not a valid domain", val)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidateExtraHost validates that the specified string is a valid extrahost and returns it.
|
|
||||||
// ExtraHost are in the form of name:ip where the ip has to be a valid ip (ipv4 or ipv6).
|
|
||||||
func ValidateExtraHost(val string) (string, error) {
|
|
||||||
// allow for IPv6 addresses in extra hosts by only splitting on first ":"
|
|
||||||
arr := strings.SplitN(val, ":", 2)
|
|
||||||
if len(arr) != 2 || len(arr[0]) == 0 {
|
|
||||||
return "", fmt.Errorf("bad format for add-host: %q", val)
|
|
||||||
}
|
|
||||||
if _, err := ValidateIPAddress(arr[1]); err != nil {
|
|
||||||
return "", fmt.Errorf("invalid IP address in add-host: %q", arr[1])
|
|
||||||
}
|
|
||||||
return val, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ValidateLabel validates that the specified string is a valid label, and returns it.
|
|
||||||
// Labels are in the form on key=value.
|
|
||||||
func ValidateLabel(val string) (string, error) {
|
|
||||||
if strings.Count(val, "=") < 1 {
|
|
||||||
return "", fmt.Errorf("bad attribute format: %s", val)
|
|
||||||
}
|
|
||||||
return val, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func doesEnvExist(name string) bool {
|
|
||||||
for _, entry := range os.Environ() {
|
|
||||||
parts := strings.SplitN(entry, "=", 2)
|
|
||||||
if parts[0] == name {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
@@ -1,6 +0,0 @@
|
|||||||
// +build !windows
|
|
||||||
|
|
||||||
package opts
|
|
||||||
|
|
||||||
// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080
|
|
||||||
const DefaultHTTPHost = "localhost"
|
|
||||||
@@ -1,56 +0,0 @@
|
|||||||
package opts
|
|
||||||
|
|
||||||
// TODO Windows. Identify bug in GOLang 1.5.1 and/or Windows Server 2016 TP4.
|
|
||||||
// @jhowardmsft, @swernli.
|
|
||||||
//
|
|
||||||
// On Windows, this mitigates a problem with the default options of running
|
|
||||||
// a docker client against a local docker daemon on TP4.
|
|
||||||
//
|
|
||||||
// What was found that if the default host is "localhost", even if the client
|
|
||||||
// (and daemon as this is local) is not physically on a network, and the DNS
|
|
||||||
// cache is flushed (ipconfig /flushdns), then the client will pause for
|
|
||||||
// exactly one second when connecting to the daemon for calls. For example
|
|
||||||
// using docker run windowsservercore cmd, the CLI will send a create followed
|
|
||||||
// by an attach. You see the delay between the attach finishing and the attach
|
|
||||||
// being seen by the daemon.
|
|
||||||
//
|
|
||||||
// Here's some daemon debug logs with additional debug spew put in. The
|
|
||||||
// AfterWriteJSON log is the very last thing the daemon does as part of the
|
|
||||||
// create call. The POST /attach is the second CLI call. Notice the second
|
|
||||||
// time gap.
|
|
||||||
//
|
|
||||||
// time="2015-11-06T13:38:37.259627400-08:00" level=debug msg="After createRootfs"
|
|
||||||
// time="2015-11-06T13:38:37.263626300-08:00" level=debug msg="After setHostConfig"
|
|
||||||
// time="2015-11-06T13:38:37.267631200-08:00" level=debug msg="before createContainerPl...."
|
|
||||||
// time="2015-11-06T13:38:37.271629500-08:00" level=debug msg=ToDiskLocking....
|
|
||||||
// time="2015-11-06T13:38:37.275643200-08:00" level=debug msg="loggin event...."
|
|
||||||
// time="2015-11-06T13:38:37.277627600-08:00" level=debug msg="logged event...."
|
|
||||||
// time="2015-11-06T13:38:37.279631800-08:00" level=debug msg="In defer func"
|
|
||||||
// time="2015-11-06T13:38:37.282628100-08:00" level=debug msg="After daemon.create"
|
|
||||||
// time="2015-11-06T13:38:37.286651700-08:00" level=debug msg="return 2"
|
|
||||||
// time="2015-11-06T13:38:37.289629500-08:00" level=debug msg="Returned from daemon.ContainerCreate"
|
|
||||||
// time="2015-11-06T13:38:37.311629100-08:00" level=debug msg="After WriteJSON"
|
|
||||||
// ... 1 second gap here....
|
|
||||||
// time="2015-11-06T13:38:38.317866200-08:00" level=debug msg="Calling POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach"
|
|
||||||
// time="2015-11-06T13:38:38.326882500-08:00" level=info msg="POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach?stderr=1&stdin=1&stdout=1&stream=1"
|
|
||||||
//
|
|
||||||
// We suspect this is either a bug introduced in GOLang 1.5.1, or that a change
|
|
||||||
// in GOLang 1.5.1 (from 1.4.3) is exposing a bug in Windows TP4. In theory,
|
|
||||||
// the Windows networking stack is supposed to resolve "localhost" internally,
|
|
||||||
// without hitting DNS, or even reading the hosts file (which is why localhost
|
|
||||||
// is commented out in the hosts file on Windows).
|
|
||||||
//
|
|
||||||
// We have validated that working around this using the actual IPv4 localhost
|
|
||||||
// address does not cause the delay.
|
|
||||||
//
|
|
||||||
// This does not occur with the docker client built with 1.4.3 on the same
|
|
||||||
// Windows TP4 build, regardless of whether the daemon is built using 1.5.1
|
|
||||||
// or 1.4.3. It does not occur on Linux. We also verified we see the same thing
|
|
||||||
// on a cross-compiled Windows binary (from Linux).
|
|
||||||
//
|
|
||||||
// Final note: This is a mitigation, not a 'real' fix. It is still susceptible
|
|
||||||
// to the delay in TP4 if a user were to do 'docker run -H=tcp://localhost:2375...'
|
|
||||||
// explicitly.
|
|
||||||
|
|
||||||
// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080
|
|
||||||
const DefaultHTTPHost = "127.0.0.1"
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
This code provides helper functions for dealing with archive files.
|
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -1,112 +0,0 @@
|
|||||||
// +build !windows
|
|
||||||
|
|
||||||
package archive
|
|
||||||
|
|
||||||
import (
|
|
||||||
"archive/tar"
|
|
||||||
"errors"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"syscall"
|
|
||||||
|
|
||||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
|
|
||||||
)
|
|
||||||
|
|
||||||
// fixVolumePathPrefix does platform specific processing to ensure that if
|
|
||||||
// the path being passed in is not in a volume path format, convert it to one.
|
|
||||||
func fixVolumePathPrefix(srcPath string) string {
|
|
||||||
return srcPath
|
|
||||||
}
|
|
||||||
|
|
||||||
// getWalkRoot calculates the root path when performing a TarWithOptions.
|
|
||||||
// We use a separate function as this is platform specific. On Linux, we
|
|
||||||
// can't use filepath.Join(srcPath,include) because this will clean away
|
|
||||||
// a trailing "." or "/" which may be important.
|
|
||||||
func getWalkRoot(srcPath string, include string) string {
|
|
||||||
return srcPath + string(filepath.Separator) + include
|
|
||||||
}
|
|
||||||
|
|
||||||
// CanonicalTarNameForPath returns platform-specific filepath
|
|
||||||
// to canonical posix-style path for tar archival. p is relative
|
|
||||||
// path.
|
|
||||||
func CanonicalTarNameForPath(p string) (string, error) {
|
|
||||||
return p, nil // already unix-style
|
|
||||||
}
|
|
||||||
|
|
||||||
// chmodTarEntry is used to adjust the file permissions used in tar header based
|
|
||||||
// on the platform the archival is done.
|
|
||||||
|
|
||||||
func chmodTarEntry(perm os.FileMode) os.FileMode {
|
|
||||||
return perm // noop for unix as golang APIs provide perm bits correctly
|
|
||||||
}
|
|
||||||
|
|
||||||
func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) {
|
|
||||||
s, ok := stat.(*syscall.Stat_t)
|
|
||||||
|
|
||||||
if !ok {
|
|
||||||
err = errors.New("cannot convert stat value to syscall.Stat_t")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
inode = uint64(s.Ino)
|
|
||||||
|
|
||||||
// Currently go does not fill in the major/minors
|
|
||||||
if s.Mode&syscall.S_IFBLK != 0 ||
|
|
||||||
s.Mode&syscall.S_IFCHR != 0 {
|
|
||||||
hdr.Devmajor = int64(major(uint64(s.Rdev)))
|
|
||||||
hdr.Devminor = int64(minor(uint64(s.Rdev)))
|
|
||||||
}
|
|
||||||
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func getFileUIDGID(stat interface{}) (int, int, error) {
|
|
||||||
s, ok := stat.(*syscall.Stat_t)
|
|
||||||
|
|
||||||
if !ok {
|
|
||||||
return -1, -1, errors.New("cannot convert stat value to syscall.Stat_t")
|
|
||||||
}
|
|
||||||
return int(s.Uid), int(s.Gid), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func major(device uint64) uint64 {
|
|
||||||
return (device >> 8) & 0xfff
|
|
||||||
}
|
|
||||||
|
|
||||||
func minor(device uint64) uint64 {
|
|
||||||
return (device & 0xff) | ((device >> 12) & 0xfff00)
|
|
||||||
}
|
|
||||||
|
|
||||||
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
|
|
||||||
// createTarFile to handle the following types of header: Block; Char; Fifo
|
|
||||||
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
|
|
||||||
mode := uint32(hdr.Mode & 07777)
|
|
||||||
switch hdr.Typeflag {
|
|
||||||
case tar.TypeBlock:
|
|
||||||
mode |= syscall.S_IFBLK
|
|
||||||
case tar.TypeChar:
|
|
||||||
mode |= syscall.S_IFCHR
|
|
||||||
case tar.TypeFifo:
|
|
||||||
mode |= syscall.S_IFIFO
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
|
|
||||||
if hdr.Typeflag == tar.TypeLink {
|
|
||||||
if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
|
|
||||||
if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if hdr.Typeflag != tar.TypeSymlink {
|
|
||||||
if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,70 +0,0 @@
|
|||||||
// +build windows
|
|
||||||
|
|
||||||
package archive
|
|
||||||
|
|
||||||
import (
|
|
||||||
"archive/tar"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/longpath"
|
|
||||||
)
|
|
||||||
|
|
||||||
// fixVolumePathPrefix does platform specific processing to ensure that if
|
|
||||||
// the path being passed in is not in a volume path format, convert it to one.
|
|
||||||
func fixVolumePathPrefix(srcPath string) string {
|
|
||||||
return longpath.AddPrefix(srcPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
// getWalkRoot calculates the root path when performing a TarWithOptions.
|
|
||||||
// We use a separate function as this is platform specific.
|
|
||||||
func getWalkRoot(srcPath string, include string) string {
|
|
||||||
return filepath.Join(srcPath, include)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CanonicalTarNameForPath returns platform-specific filepath
|
|
||||||
// to canonical posix-style path for tar archival. p is relative
|
|
||||||
// path.
|
|
||||||
func CanonicalTarNameForPath(p string) (string, error) {
|
|
||||||
// windows: convert windows style relative path with backslashes
|
|
||||||
// into forward slashes. Since windows does not allow '/' or '\'
|
|
||||||
// in file names, it is mostly safe to replace however we must
|
|
||||||
// check just in case
|
|
||||||
if strings.Contains(p, "/") {
|
|
||||||
return "", fmt.Errorf("Windows path contains forward slash: %s", p)
|
|
||||||
}
|
|
||||||
return strings.Replace(p, string(os.PathSeparator), "/", -1), nil
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// chmodTarEntry is used to adjust the file permissions used in tar header based
|
|
||||||
// on the platform the archival is done.
|
|
||||||
func chmodTarEntry(perm os.FileMode) os.FileMode {
|
|
||||||
perm &= 0755
|
|
||||||
// Add the x bit: make everything +x from windows
|
|
||||||
perm |= 0111
|
|
||||||
|
|
||||||
return perm
|
|
||||||
}
|
|
||||||
|
|
||||||
func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) {
|
|
||||||
// do nothing. no notion of Rdev, Inode, Nlink in stat on Windows
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
|
|
||||||
// createTarFile to handle the following types of header: Block; Char; Fifo
|
|
||||||
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func getFileUIDGID(stat interface{}) (int, int, error) {
|
|
||||||
// no notion of file ownership mapping yet on Windows
|
|
||||||
return 0, 0, nil
|
|
||||||
}
|
|
||||||
@@ -1,416 +0,0 @@
|
|||||||
package archive
|
|
||||||
|
|
||||||
import (
|
|
||||||
"archive/tar"
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"sort"
|
|
||||||
"strings"
|
|
||||||
"syscall"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
|
|
||||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools"
|
|
||||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools"
|
|
||||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
|
|
||||||
)
|
|
||||||
|
|
||||||
// ChangeType represents the change type.
|
|
||||||
type ChangeType int
|
|
||||||
|
|
||||||
const (
|
|
||||||
// ChangeModify represents the modify operation.
|
|
||||||
ChangeModify = iota
|
|
||||||
// ChangeAdd represents the add operation.
|
|
||||||
ChangeAdd
|
|
||||||
// ChangeDelete represents the delete operation.
|
|
||||||
ChangeDelete
|
|
||||||
)
|
|
||||||
|
|
||||||
func (c ChangeType) String() string {
|
|
||||||
switch c {
|
|
||||||
case ChangeModify:
|
|
||||||
return "C"
|
|
||||||
case ChangeAdd:
|
|
||||||
return "A"
|
|
||||||
case ChangeDelete:
|
|
||||||
return "D"
|
|
||||||
}
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// Change represents a change, it wraps the change type and path.
|
|
||||||
// It describes changes of the files in the path respect to the
|
|
||||||
// parent layers. The change could be modify, add, delete.
|
|
||||||
// This is used for layer diff.
|
|
||||||
type Change struct {
|
|
||||||
Path string
|
|
||||||
Kind ChangeType
|
|
||||||
}
|
|
||||||
|
|
||||||
func (change *Change) String() string {
|
|
||||||
return fmt.Sprintf("%s %s", change.Kind, change.Path)
|
|
||||||
}
|
|
||||||
|
|
||||||
// for sort.Sort
|
|
||||||
type changesByPath []Change
|
|
||||||
|
|
||||||
func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path }
|
|
||||||
func (c changesByPath) Len() int { return len(c) }
|
|
||||||
func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] }
|
|
||||||
|
|
||||||
// Gnu tar and the go tar writer don't have sub-second mtime
|
|
||||||
// precision, which is problematic when we apply changes via tar
|
|
||||||
// files, we handle this by comparing for exact times, *or* same
|
|
||||||
// second count and either a or b having exactly 0 nanoseconds
|
|
||||||
func sameFsTime(a, b time.Time) bool {
|
|
||||||
return a == b ||
|
|
||||||
(a.Unix() == b.Unix() &&
|
|
||||||
(a.Nanosecond() == 0 || b.Nanosecond() == 0))
|
|
||||||
}
|
|
||||||
|
|
||||||
func sameFsTimeSpec(a, b syscall.Timespec) bool {
|
|
||||||
return a.Sec == b.Sec &&
|
|
||||||
(a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Changes walks the path rw and determines changes for the files in the path,
|
|
||||||
// with respect to the parent layers
|
|
||||||
func Changes(layers []string, rw string) ([]Change, error) {
|
|
||||||
var (
|
|
||||||
changes []Change
|
|
||||||
changedDirs = make(map[string]struct{})
|
|
||||||
)
|
|
||||||
|
|
||||||
err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error {
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rebase path
|
|
||||||
path, err = filepath.Rel(rw, path)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// As this runs on the daemon side, file paths are OS specific.
|
|
||||||
path = filepath.Join(string(os.PathSeparator), path)
|
|
||||||
|
|
||||||
// Skip root
|
|
||||||
if path == string(os.PathSeparator) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Skip AUFS metadata
|
|
||||||
if matched, err := filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path); err != nil || matched {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
change := Change{
|
|
||||||
Path: path,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find out what kind of modification happened
|
|
||||||
file := filepath.Base(path)
|
|
||||||
// If there is a whiteout, then the file was removed
|
|
||||||
if strings.HasPrefix(file, WhiteoutPrefix) {
|
|
||||||
originalFile := file[len(WhiteoutPrefix):]
|
|
||||||
change.Path = filepath.Join(filepath.Dir(path), originalFile)
|
|
||||||
change.Kind = ChangeDelete
|
|
||||||
} else {
|
|
||||||
// Otherwise, the file was added
|
|
||||||
change.Kind = ChangeAdd
|
|
||||||
|
|
||||||
// ...Unless it already existed in a top layer, in which case, it's a modification
|
|
||||||
for _, layer := range layers {
|
|
||||||
stat, err := os.Stat(filepath.Join(layer, path))
|
|
||||||
if err != nil && !os.IsNotExist(err) {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err == nil {
|
|
||||||
// The file existed in the top layer, so that's a modification
|
|
||||||
|
|
||||||
// However, if it's a directory, maybe it wasn't actually modified.
|
|
||||||
// If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar
|
|
||||||
if stat.IsDir() && f.IsDir() {
|
|
||||||
if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) {
|
|
||||||
// Both directories are the same, don't record the change
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
change.Kind = ChangeModify
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files.
|
|
||||||
// This block is here to ensure the change is recorded even if the
|
|
||||||
// modify time, mode and size of the parent directory in the rw and ro layers are all equal.
|
|
||||||
// Check https://github.com/docker/docker/pull/13590 for details.
|
|
||||||
if f.IsDir() {
|
|
||||||
changedDirs[path] = struct{}{}
|
|
||||||
}
|
|
||||||
if change.Kind == ChangeAdd || change.Kind == ChangeDelete {
|
|
||||||
parent := filepath.Dir(path)
|
|
||||||
if _, ok := changedDirs[parent]; !ok && parent != "/" {
|
|
||||||
changes = append(changes, Change{Path: parent, Kind: ChangeModify})
|
|
||||||
changedDirs[parent] = struct{}{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Record change
|
|
||||||
changes = append(changes, change)
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
if err != nil && !os.IsNotExist(err) {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return changes, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// FileInfo describes the information of a file.
|
|
||||||
type FileInfo struct {
|
|
||||||
parent *FileInfo
|
|
||||||
name string
|
|
||||||
stat *system.StatT
|
|
||||||
children map[string]*FileInfo
|
|
||||||
capability []byte
|
|
||||||
added bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// LookUp looks up the file information of a file.
|
|
||||||
func (info *FileInfo) LookUp(path string) *FileInfo {
|
|
||||||
// As this runs on the daemon side, file paths are OS specific.
|
|
||||||
parent := info
|
|
||||||
if path == string(os.PathSeparator) {
|
|
||||||
return info
|
|
||||||
}
|
|
||||||
|
|
||||||
pathElements := strings.Split(path, string(os.PathSeparator))
|
|
||||||
for _, elem := range pathElements {
|
|
||||||
if elem != "" {
|
|
||||||
child := parent.children[elem]
|
|
||||||
if child == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
parent = child
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return parent
|
|
||||||
}
|
|
||||||
|
|
||||||
func (info *FileInfo) path() string {
|
|
||||||
if info.parent == nil {
|
|
||||||
// As this runs on the daemon side, file paths are OS specific.
|
|
||||||
return string(os.PathSeparator)
|
|
||||||
}
|
|
||||||
return filepath.Join(info.parent.path(), info.name)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
|
|
||||||
|
|
||||||
sizeAtEntry := len(*changes)
|
|
||||||
|
|
||||||
if oldInfo == nil {
|
|
||||||
// add
|
|
||||||
change := Change{
|
|
||||||
Path: info.path(),
|
|
||||||
Kind: ChangeAdd,
|
|
||||||
}
|
|
||||||
*changes = append(*changes, change)
|
|
||||||
info.added = true
|
|
||||||
}
|
|
||||||
|
|
||||||
// We make a copy so we can modify it to detect additions
|
|
||||||
// also, we only recurse on the old dir if the new info is a directory
|
|
||||||
// otherwise any previous delete/change is considered recursive
|
|
||||||
oldChildren := make(map[string]*FileInfo)
|
|
||||||
if oldInfo != nil && info.isDir() {
|
|
||||||
for k, v := range oldInfo.children {
|
|
||||||
oldChildren[k] = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for name, newChild := range info.children {
|
|
||||||
oldChild, _ := oldChildren[name]
|
|
||||||
if oldChild != nil {
|
|
||||||
// change?
|
|
||||||
oldStat := oldChild.stat
|
|
||||||
newStat := newChild.stat
|
|
||||||
// Note: We can't compare inode or ctime or blocksize here, because these change
|
|
||||||
// when copying a file into a container. However, that is not generally a problem
|
|
||||||
// because any content change will change mtime, and any status change should
|
|
||||||
// be visible when actually comparing the stat fields. The only time this
|
|
||||||
// breaks down is if some code intentionally hides a change by setting
|
|
||||||
// back mtime
|
|
||||||
if statDifferent(oldStat, newStat) ||
|
|
||||||
bytes.Compare(oldChild.capability, newChild.capability) != 0 {
|
|
||||||
change := Change{
|
|
||||||
Path: newChild.path(),
|
|
||||||
Kind: ChangeModify,
|
|
||||||
}
|
|
||||||
*changes = append(*changes, change)
|
|
||||||
newChild.added = true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove from copy so we can detect deletions
|
|
||||||
delete(oldChildren, name)
|
|
||||||
}
|
|
||||||
|
|
||||||
newChild.addChanges(oldChild, changes)
|
|
||||||
}
|
|
||||||
for _, oldChild := range oldChildren {
|
|
||||||
// delete
|
|
||||||
change := Change{
|
|
||||||
Path: oldChild.path(),
|
|
||||||
Kind: ChangeDelete,
|
|
||||||
}
|
|
||||||
*changes = append(*changes, change)
|
|
||||||
}
|
|
||||||
|
|
||||||
// If there were changes inside this directory, we need to add it, even if the directory
|
|
||||||
// itself wasn't changed. This is needed to properly save and restore filesystem permissions.
|
|
||||||
// As this runs on the daemon side, file paths are OS specific.
|
|
||||||
if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) {
|
|
||||||
change := Change{
|
|
||||||
Path: info.path(),
|
|
||||||
Kind: ChangeModify,
|
|
||||||
}
|
|
||||||
// Let's insert the directory entry before the recently added entries located inside this dir
|
|
||||||
*changes = append(*changes, change) // just to resize the slice, will be overwritten
|
|
||||||
copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:])
|
|
||||||
(*changes)[sizeAtEntry] = change
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// Changes add changes to file information.
|
|
||||||
func (info *FileInfo) Changes(oldInfo *FileInfo) []Change {
|
|
||||||
var changes []Change
|
|
||||||
|
|
||||||
info.addChanges(oldInfo, &changes)
|
|
||||||
|
|
||||||
return changes
|
|
||||||
}
|
|
||||||
|
|
||||||
func newRootFileInfo() *FileInfo {
|
|
||||||
// As this runs on the daemon side, file paths are OS specific.
|
|
||||||
root := &FileInfo{
|
|
||||||
name: string(os.PathSeparator),
|
|
||||||
children: make(map[string]*FileInfo),
|
|
||||||
}
|
|
||||||
return root
|
|
||||||
}
|
|
||||||
|
|
||||||
// ChangesDirs compares two directories and generates an array of Change objects describing the changes.
|
|
||||||
// If oldDir is "", then all files in newDir will be Add-Changes.
|
|
||||||
func ChangesDirs(newDir, oldDir string) ([]Change, error) {
|
|
||||||
var (
|
|
||||||
oldRoot, newRoot *FileInfo
|
|
||||||
)
|
|
||||||
if oldDir == "" {
|
|
||||||
emptyDir, err := ioutil.TempDir("", "empty")
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer os.Remove(emptyDir)
|
|
||||||
oldDir = emptyDir
|
|
||||||
}
|
|
||||||
oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return newRoot.Changes(oldRoot), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ChangesSize calculates the size in bytes of the provided changes, based on newDir.
|
|
||||||
func ChangesSize(newDir string, changes []Change) int64 {
|
|
||||||
var (
|
|
||||||
size int64
|
|
||||||
sf = make(map[uint64]struct{})
|
|
||||||
)
|
|
||||||
for _, change := range changes {
|
|
||||||
if change.Kind == ChangeModify || change.Kind == ChangeAdd {
|
|
||||||
file := filepath.Join(newDir, change.Path)
|
|
||||||
fileInfo, err := os.Lstat(file)
|
|
||||||
if err != nil {
|
|
||||||
logrus.Errorf("Can not stat %q: %s", file, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
if fileInfo != nil && !fileInfo.IsDir() {
|
|
||||||
if hasHardlinks(fileInfo) {
|
|
||||||
inode := getIno(fileInfo)
|
|
||||||
if _, ok := sf[inode]; !ok {
|
|
||||||
size += fileInfo.Size()
|
|
||||||
sf[inode] = struct{}{}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
size += fileInfo.Size()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return size
|
|
||||||
}
|
|
||||||
|
|
||||||
// ExportChanges produces an Archive from the provided changes, relative to dir.
|
|
||||||
func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (Archive, error) {
|
|
||||||
reader, writer := io.Pipe()
|
|
||||||
go func() {
|
|
||||||
ta := &tarAppender{
|
|
||||||
TarWriter: tar.NewWriter(writer),
|
|
||||||
Buffer: pools.BufioWriter32KPool.Get(nil),
|
|
||||||
SeenFiles: make(map[uint64]string),
|
|
||||||
UIDMaps: uidMaps,
|
|
||||||
GIDMaps: gidMaps,
|
|
||||||
}
|
|
||||||
// this buffer is needed for the duration of this piped stream
|
|
||||||
defer pools.BufioWriter32KPool.Put(ta.Buffer)
|
|
||||||
|
|
||||||
sort.Sort(changesByPath(changes))
|
|
||||||
|
|
||||||
// In general we log errors here but ignore them because
|
|
||||||
// during e.g. a diff operation the container can continue
|
|
||||||
// mutating the filesystem and we can see transient errors
|
|
||||||
// from this
|
|
||||||
for _, change := range changes {
|
|
||||||
if change.Kind == ChangeDelete {
|
|
||||||
whiteOutDir := filepath.Dir(change.Path)
|
|
||||||
whiteOutBase := filepath.Base(change.Path)
|
|
||||||
whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase)
|
|
||||||
timestamp := time.Now()
|
|
||||||
hdr := &tar.Header{
|
|
||||||
Name: whiteOut[1:],
|
|
||||||
Size: 0,
|
|
||||||
ModTime: timestamp,
|
|
||||||
AccessTime: timestamp,
|
|
||||||
ChangeTime: timestamp,
|
|
||||||
}
|
|
||||||
if err := ta.TarWriter.WriteHeader(hdr); err != nil {
|
|
||||||
logrus.Debugf("Can't write whiteout header: %s", err)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
path := filepath.Join(dir, change.Path)
|
|
||||||
if err := ta.addTarFile(path, change.Path[1:]); err != nil {
|
|
||||||
logrus.Debugf("Can't add file %s to tar: %s", path, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make sure to check the error on Close.
|
|
||||||
if err := ta.TarWriter.Close(); err != nil {
|
|
||||||
logrus.Debugf("Can't close layer: %s", err)
|
|
||||||
}
|
|
||||||
if err := writer.Close(); err != nil {
|
|
||||||
logrus.Debugf("failed close Changes writer: %s", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
return reader, nil
|
|
||||||
}
|
|
||||||
@@ -1,285 +0,0 @@
|
|||||||
package archive
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"sort"
|
|
||||||
"syscall"
|
|
||||||
"unsafe"
|
|
||||||
|
|
||||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
|
|
||||||
)
|
|
||||||
|
|
||||||
// walker is used to implement collectFileInfoForChanges on linux. Where this
|
|
||||||
// method in general returns the entire contents of two directory trees, we
|
|
||||||
// optimize some FS calls out on linux. In particular, we take advantage of the
|
|
||||||
// fact that getdents(2) returns the inode of each file in the directory being
|
|
||||||
// walked, which, when walking two trees in parallel to generate a list of
|
|
||||||
// changes, can be used to prune subtrees without ever having to lstat(2) them
|
|
||||||
// directly. Eliminating stat calls in this way can save up to seconds on large
|
|
||||||
// images.
|
|
||||||
type walker struct {
|
|
||||||
dir1 string
|
|
||||||
dir2 string
|
|
||||||
root1 *FileInfo
|
|
||||||
root2 *FileInfo
|
|
||||||
}
|
|
||||||
|
|
||||||
// collectFileInfoForChanges returns a complete representation of the trees
|
|
||||||
// rooted at dir1 and dir2, with one important exception: any subtree or
|
|
||||||
// leaf where the inode and device numbers are an exact match between dir1
|
|
||||||
// and dir2 will be pruned from the results. This method is *only* to be used
|
|
||||||
// to generating a list of changes between the two directories, as it does not
|
|
||||||
// reflect the full contents.
|
|
||||||
func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) {
|
|
||||||
w := &walker{
|
|
||||||
dir1: dir1,
|
|
||||||
dir2: dir2,
|
|
||||||
root1: newRootFileInfo(),
|
|
||||||
root2: newRootFileInfo(),
|
|
||||||
}
|
|
||||||
|
|
||||||
i1, err := os.Lstat(w.dir1)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
i2, err := os.Lstat(w.dir2)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := w.walk("/", i1, i2); err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return w.root1, w.root2, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Given a FileInfo, its path info, and a reference to the root of the tree
|
|
||||||
// being constructed, register this file with the tree.
|
|
||||||
func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error {
|
|
||||||
if fi == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
parent := root.LookUp(filepath.Dir(path))
|
|
||||||
if parent == nil {
|
|
||||||
return fmt.Errorf("collectFileInfoForChanges: Unexpectedly no parent for %s", path)
|
|
||||||
}
|
|
||||||
info := &FileInfo{
|
|
||||||
name: filepath.Base(path),
|
|
||||||
children: make(map[string]*FileInfo),
|
|
||||||
parent: parent,
|
|
||||||
}
|
|
||||||
cpath := filepath.Join(dir, path)
|
|
||||||
stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
info.stat = stat
|
|
||||||
info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access
|
|
||||||
parent.children[info.name] = info
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Walk a subtree rooted at the same path in both trees being iterated. For
|
|
||||||
// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d
|
|
||||||
func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) {
|
|
||||||
// Register these nodes with the return trees, unless we're still at the
|
|
||||||
// (already-created) roots:
|
|
||||||
if path != "/" {
|
|
||||||
if err := walkchunk(path, i1, w.dir1, w.root1); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if err := walkchunk(path, i2, w.dir2, w.root2); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
is1Dir := i1 != nil && i1.IsDir()
|
|
||||||
is2Dir := i2 != nil && i2.IsDir()
|
|
||||||
|
|
||||||
sameDevice := false
|
|
||||||
if i1 != nil && i2 != nil {
|
|
||||||
si1 := i1.Sys().(*syscall.Stat_t)
|
|
||||||
si2 := i2.Sys().(*syscall.Stat_t)
|
|
||||||
if si1.Dev == si2.Dev {
|
|
||||||
sameDevice = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If these files are both non-existent, or leaves (non-dirs), we are done.
|
|
||||||
if !is1Dir && !is2Dir {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fetch the names of all the files contained in both directories being walked:
|
|
||||||
var names1, names2 []nameIno
|
|
||||||
if is1Dir {
|
|
||||||
names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if is2Dir {
|
|
||||||
names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// We have lists of the files contained in both parallel directories, sorted
|
|
||||||
// in the same order. Walk them in parallel, generating a unique merged list
|
|
||||||
// of all items present in either or both directories.
|
|
||||||
var names []string
|
|
||||||
ix1 := 0
|
|
||||||
ix2 := 0
|
|
||||||
|
|
||||||
for {
|
|
||||||
if ix1 >= len(names1) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if ix2 >= len(names2) {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
ni1 := names1[ix1]
|
|
||||||
ni2 := names2[ix2]
|
|
||||||
|
|
||||||
switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) {
|
|
||||||
case -1: // ni1 < ni2 -- advance ni1
|
|
||||||
// we will not encounter ni1 in names2
|
|
||||||
names = append(names, ni1.name)
|
|
||||||
ix1++
|
|
||||||
case 0: // ni1 == ni2
|
|
||||||
if ni1.ino != ni2.ino || !sameDevice {
|
|
||||||
names = append(names, ni1.name)
|
|
||||||
}
|
|
||||||
ix1++
|
|
||||||
ix2++
|
|
||||||
case 1: // ni1 > ni2 -- advance ni2
|
|
||||||
// we will not encounter ni2 in names1
|
|
||||||
names = append(names, ni2.name)
|
|
||||||
ix2++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for ix1 < len(names1) {
|
|
||||||
names = append(names, names1[ix1].name)
|
|
||||||
ix1++
|
|
||||||
}
|
|
||||||
for ix2 < len(names2) {
|
|
||||||
names = append(names, names2[ix2].name)
|
|
||||||
ix2++
|
|
||||||
}
|
|
||||||
|
|
||||||
// For each of the names present in either or both of the directories being
|
|
||||||
// iterated, stat the name under each root, and recurse the pair of them:
|
|
||||||
for _, name := range names {
|
|
||||||
fname := filepath.Join(path, name)
|
|
||||||
var cInfo1, cInfo2 os.FileInfo
|
|
||||||
if is1Dir {
|
|
||||||
cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access
|
|
||||||
if err != nil && !os.IsNotExist(err) {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if is2Dir {
|
|
||||||
cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access
|
|
||||||
if err != nil && !os.IsNotExist(err) {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err = w.walk(fname, cInfo1, cInfo2); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// {name,inode} pairs used to support the early-pruning logic of the walker type
|
|
||||||
type nameIno struct {
|
|
||||||
name string
|
|
||||||
ino uint64
|
|
||||||
}
|
|
||||||
|
|
||||||
type nameInoSlice []nameIno
|
|
||||||
|
|
||||||
func (s nameInoSlice) Len() int { return len(s) }
|
|
||||||
func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
|
|
||||||
func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name }
|
|
||||||
|
|
||||||
// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode
|
|
||||||
// numbers further up the stack when reading directory contents. Unlike
|
|
||||||
// os.Readdirnames, which returns a list of filenames, this function returns a
|
|
||||||
// list of {filename,inode} pairs.
|
|
||||||
func readdirnames(dirname string) (names []nameIno, err error) {
|
|
||||||
var (
|
|
||||||
size = 100
|
|
||||||
buf = make([]byte, 4096)
|
|
||||||
nbuf int
|
|
||||||
bufp int
|
|
||||||
nb int
|
|
||||||
)
|
|
||||||
|
|
||||||
f, err := os.Open(dirname)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
names = make([]nameIno, 0, size) // Empty with room to grow.
|
|
||||||
for {
|
|
||||||
// Refill the buffer if necessary
|
|
||||||
if bufp >= nbuf {
|
|
||||||
bufp = 0
|
|
||||||
nbuf, err = syscall.ReadDirent(int(f.Fd()), buf) // getdents on linux
|
|
||||||
if nbuf < 0 {
|
|
||||||
nbuf = 0
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return nil, os.NewSyscallError("readdirent", err)
|
|
||||||
}
|
|
||||||
if nbuf <= 0 {
|
|
||||||
break // EOF
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Drain the buffer
|
|
||||||
nb, names = parseDirent(buf[bufp:nbuf], names)
|
|
||||||
bufp += nb
|
|
||||||
}
|
|
||||||
|
|
||||||
sl := nameInoSlice(names)
|
|
||||||
sort.Sort(sl)
|
|
||||||
return sl, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// parseDirent is a minor modification of syscall.ParseDirent (linux version)
|
|
||||||
// which returns {name,inode} pairs instead of just names.
|
|
||||||
func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) {
|
|
||||||
origlen := len(buf)
|
|
||||||
for len(buf) > 0 {
|
|
||||||
dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0]))
|
|
||||||
buf = buf[dirent.Reclen:]
|
|
||||||
if dirent.Ino == 0 { // File absent in directory.
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0]))
|
|
||||||
var name = string(bytes[0:clen(bytes[:])])
|
|
||||||
if name == "." || name == ".." { // Useless names
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
names = append(names, nameIno{name, dirent.Ino})
|
|
||||||
}
|
|
||||||
return origlen - len(buf), names
|
|
||||||
}
|
|
||||||
|
|
||||||
func clen(n []byte) int {
|
|
||||||
for i := 0; i < len(n); i++ {
|
|
||||||
if n[i] == 0 {
|
|
||||||
return i
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return len(n)
|
|
||||||
}
|
|
||||||
@@ -1,97 +0,0 @@
|
|||||||
// +build !linux
|
|
||||||
|
|
||||||
package archive
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
|
|
||||||
)
|
|
||||||
|
|
||||||
func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) {
|
|
||||||
var (
|
|
||||||
oldRoot, newRoot *FileInfo
|
|
||||||
err1, err2 error
|
|
||||||
errs = make(chan error, 2)
|
|
||||||
)
|
|
||||||
go func() {
|
|
||||||
oldRoot, err1 = collectFileInfo(oldDir)
|
|
||||||
errs <- err1
|
|
||||||
}()
|
|
||||||
go func() {
|
|
||||||
newRoot, err2 = collectFileInfo(newDir)
|
|
||||||
errs <- err2
|
|
||||||
}()
|
|
||||||
|
|
||||||
// block until both routines have returned
|
|
||||||
for i := 0; i < 2; i++ {
|
|
||||||
if err := <-errs; err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return oldRoot, newRoot, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func collectFileInfo(sourceDir string) (*FileInfo, error) {
|
|
||||||
root := newRootFileInfo()
|
|
||||||
|
|
||||||
err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error {
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rebase path
|
|
||||||
relPath, err := filepath.Rel(sourceDir, path)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// As this runs on the daemon side, file paths are OS specific.
|
|
||||||
relPath = filepath.Join(string(os.PathSeparator), relPath)
|
|
||||||
|
|
||||||
// See https://github.com/golang/go/issues/9168 - bug in filepath.Join.
|
|
||||||
// Temporary workaround. If the returned path starts with two backslashes,
|
|
||||||
// trim it down to a single backslash. Only relevant on Windows.
|
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
if strings.HasPrefix(relPath, `\\`) {
|
|
||||||
relPath = relPath[1:]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if relPath == string(os.PathSeparator) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
parent := root.LookUp(filepath.Dir(relPath))
|
|
||||||
if parent == nil {
|
|
||||||
return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
info := &FileInfo{
|
|
||||||
name: filepath.Base(relPath),
|
|
||||||
children: make(map[string]*FileInfo),
|
|
||||||
parent: parent,
|
|
||||||
}
|
|
||||||
|
|
||||||
s, err := system.Lstat(path)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
info.stat = s
|
|
||||||
|
|
||||||
info.capability, _ = system.Lgetxattr(path, "security.capability")
|
|
||||||
|
|
||||||
parent.children[info.name] = info
|
|
||||||
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return root, nil
|
|
||||||
}
|
|
||||||
@@ -1,36 +0,0 @@
|
|||||||
// +build !windows
|
|
||||||
|
|
||||||
package archive
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"syscall"
|
|
||||||
|
|
||||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
|
|
||||||
)
|
|
||||||
|
|
||||||
func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
|
|
||||||
// Don't look at size for dirs, its not a good measure of change
|
|
||||||
if oldStat.Mode() != newStat.Mode() ||
|
|
||||||
oldStat.UID() != newStat.UID() ||
|
|
||||||
oldStat.GID() != newStat.GID() ||
|
|
||||||
oldStat.Rdev() != newStat.Rdev() ||
|
|
||||||
// Don't look at size for dirs, its not a good measure of change
|
|
||||||
(oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR &&
|
|
||||||
(!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (info *FileInfo) isDir() bool {
|
|
||||||
return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR != 0
|
|
||||||
}
|
|
||||||
|
|
||||||
func getIno(fi os.FileInfo) uint64 {
|
|
||||||
return uint64(fi.Sys().(*syscall.Stat_t).Ino)
|
|
||||||
}
|
|
||||||
|
|
||||||
func hasHardlinks(fi os.FileInfo) bool {
|
|
||||||
return fi.Sys().(*syscall.Stat_t).Nlink > 1
|
|
||||||
}
|
|
||||||
@@ -1,30 +0,0 @@
|
|||||||
package archive
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
|
|
||||||
)
|
|
||||||
|
|
||||||
func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
|
|
||||||
|
|
||||||
// Don't look at size for dirs, its not a good measure of change
|
|
||||||
if oldStat.ModTime() != newStat.ModTime() ||
|
|
||||||
oldStat.Mode() != newStat.Mode() ||
|
|
||||||
oldStat.Size() != newStat.Size() && !oldStat.IsDir() {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
func (info *FileInfo) isDir() bool {
|
|
||||||
return info.parent == nil || info.stat.IsDir()
|
|
||||||
}
|
|
||||||
|
|
||||||
func getIno(fi os.FileInfo) (inode uint64) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
func hasHardlinks(fi os.FileInfo) bool {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
@@ -1,458 +0,0 @@
|
|||||||
package archive
|
|
||||||
|
|
||||||
import (
|
|
||||||
"archive/tar"
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
|
|
||||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Errors used or returned by this file.
|
|
||||||
var (
|
|
||||||
ErrNotDirectory = errors.New("not a directory")
|
|
||||||
ErrDirNotExists = errors.New("no such directory")
|
|
||||||
ErrCannotCopyDir = errors.New("cannot copy directory")
|
|
||||||
ErrInvalidCopySource = errors.New("invalid copy source content")
|
|
||||||
)
|
|
||||||
|
|
||||||
// PreserveTrailingDotOrSeparator returns the given cleaned path (after
|
|
||||||
// processing using any utility functions from the path or filepath stdlib
|
|
||||||
// packages) and appends a trailing `/.` or `/` if its corresponding original
|
|
||||||
// path (from before being processed by utility functions from the path or
|
|
||||||
// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned
|
|
||||||
// path already ends in a `.` path segment, then another is not added. If the
|
|
||||||
// clean path already ends in a path separator, then another is not added.
|
|
||||||
func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string {
|
|
||||||
// Ensure paths are in platform semantics
|
|
||||||
cleanedPath = normalizePath(cleanedPath)
|
|
||||||
originalPath = normalizePath(originalPath)
|
|
||||||
|
|
||||||
if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) {
|
|
||||||
if !hasTrailingPathSeparator(cleanedPath) {
|
|
||||||
// Add a separator if it doesn't already end with one (a cleaned
|
|
||||||
// path would only end in a separator if it is the root).
|
|
||||||
cleanedPath += string(filepath.Separator)
|
|
||||||
}
|
|
||||||
cleanedPath += "."
|
|
||||||
}
|
|
||||||
|
|
||||||
if !hasTrailingPathSeparator(cleanedPath) && hasTrailingPathSeparator(originalPath) {
|
|
||||||
cleanedPath += string(filepath.Separator)
|
|
||||||
}
|
|
||||||
|
|
||||||
return cleanedPath
|
|
||||||
}
|
|
||||||
|
|
||||||
// assertsDirectory returns whether the given path is
|
|
||||||
// asserted to be a directory, i.e., the path ends with
|
|
||||||
// a trailing '/' or `/.`, assuming a path separator of `/`.
|
|
||||||
func assertsDirectory(path string) bool {
|
|
||||||
return hasTrailingPathSeparator(path) || specifiesCurrentDir(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
// hasTrailingPathSeparator returns whether the given
|
|
||||||
// path ends with the system's path separator character.
|
|
||||||
func hasTrailingPathSeparator(path string) bool {
|
|
||||||
return len(path) > 0 && os.IsPathSeparator(path[len(path)-1])
|
|
||||||
}
|
|
||||||
|
|
||||||
// specifiesCurrentDir returns whether the given path specifies
|
|
||||||
// a "current directory", i.e., the last path segment is `.`.
|
|
||||||
func specifiesCurrentDir(path string) bool {
|
|
||||||
return filepath.Base(path) == "."
|
|
||||||
}
|
|
||||||
|
|
||||||
// SplitPathDirEntry splits the given path between its directory name and its
|
|
||||||
// basename by first cleaning the path but preserves a trailing "." if the
|
|
||||||
// original path specified the current directory.
|
|
||||||
func SplitPathDirEntry(path string) (dir, base string) {
|
|
||||||
cleanedPath := filepath.Clean(normalizePath(path))
|
|
||||||
|
|
||||||
if specifiesCurrentDir(path) {
|
|
||||||
cleanedPath += string(filepath.Separator) + "."
|
|
||||||
}
|
|
||||||
|
|
||||||
return filepath.Dir(cleanedPath), filepath.Base(cleanedPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TarResource archives the resource described by the given CopyInfo to a Tar
|
|
||||||
// archive. A non-nil error is returned if sourcePath does not exist or is
|
|
||||||
// asserted to be a directory but exists as another type of file.
|
|
||||||
//
|
|
||||||
// This function acts as a convenient wrapper around TarWithOptions, which
|
|
||||||
// requires a directory as the source path. TarResource accepts either a
|
|
||||||
// directory or a file path and correctly sets the Tar options.
|
|
||||||
func TarResource(sourceInfo CopyInfo) (content Archive, err error) {
|
|
||||||
return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TarResourceRebase is like TarResource but renames the first path element of
|
|
||||||
// items in the resulting tar archive to match the given rebaseName if not "".
|
|
||||||
func TarResourceRebase(sourcePath, rebaseName string) (content Archive, err error) {
|
|
||||||
sourcePath = normalizePath(sourcePath)
|
|
||||||
if _, err = os.Lstat(sourcePath); err != nil {
|
|
||||||
// Catches the case where the source does not exist or is not a
|
|
||||||
// directory if asserted to be a directory, as this also causes an
|
|
||||||
// error.
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Separate the source path between it's directory and
|
|
||||||
// the entry in that directory which we are archiving.
|
|
||||||
sourceDir, sourceBase := SplitPathDirEntry(sourcePath)
|
|
||||||
|
|
||||||
filter := []string{sourceBase}
|
|
||||||
|
|
||||||
logrus.Debugf("copying %q from %q", sourceBase, sourceDir)
|
|
||||||
|
|
||||||
return TarWithOptions(sourceDir, &TarOptions{
|
|
||||||
Compression: Uncompressed,
|
|
||||||
IncludeFiles: filter,
|
|
||||||
IncludeSourceDir: true,
|
|
||||||
RebaseNames: map[string]string{
|
|
||||||
sourceBase: rebaseName,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// CopyInfo holds basic info about the source
|
|
||||||
// or destination path of a copy operation.
|
|
||||||
type CopyInfo struct {
|
|
||||||
Path string
|
|
||||||
Exists bool
|
|
||||||
IsDir bool
|
|
||||||
RebaseName string
|
|
||||||
}
|
|
||||||
|
|
||||||
// CopyInfoSourcePath stats the given path to create a CopyInfo
|
|
||||||
// struct representing that resource for the source of an archive copy
|
|
||||||
// operation. The given path should be an absolute local path. A source path
|
|
||||||
// has all symlinks evaluated that appear before the last path separator ("/"
|
|
||||||
// on Unix). As it is to be a copy source, the path must exist.
|
|
||||||
func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) {
|
|
||||||
// normalize the file path and then evaluate the symbol link
|
|
||||||
// we will use the target file instead of the symbol link if
|
|
||||||
// followLink is set
|
|
||||||
path = normalizePath(path)
|
|
||||||
|
|
||||||
resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink)
|
|
||||||
if err != nil {
|
|
||||||
return CopyInfo{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
stat, err := os.Lstat(resolvedPath)
|
|
||||||
if err != nil {
|
|
||||||
return CopyInfo{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return CopyInfo{
|
|
||||||
Path: resolvedPath,
|
|
||||||
Exists: true,
|
|
||||||
IsDir: stat.IsDir(),
|
|
||||||
RebaseName: rebaseName,
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CopyInfoDestinationPath stats the given path to create a CopyInfo
|
|
||||||
// struct representing that resource for the destination of an archive copy
|
|
||||||
// operation. The given path should be an absolute local path.
|
|
||||||
func CopyInfoDestinationPath(path string) (info CopyInfo, err error) {
|
|
||||||
maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot.
|
|
||||||
path = normalizePath(path)
|
|
||||||
originalPath := path
|
|
||||||
|
|
||||||
stat, err := os.Lstat(path)
|
|
||||||
|
|
||||||
if err == nil && stat.Mode()&os.ModeSymlink == 0 {
|
|
||||||
// The path exists and is not a symlink.
|
|
||||||
return CopyInfo{
|
|
||||||
Path: path,
|
|
||||||
Exists: true,
|
|
||||||
IsDir: stat.IsDir(),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// While the path is a symlink.
|
|
||||||
for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ {
|
|
||||||
if n > maxSymlinkIter {
|
|
||||||
// Don't follow symlinks more than this arbitrary number of times.
|
|
||||||
return CopyInfo{}, errors.New("too many symlinks in " + originalPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
// The path is a symbolic link. We need to evaluate it so that the
|
|
||||||
// destination of the copy operation is the link target and not the
|
|
||||||
// link itself. This is notably different than CopyInfoSourcePath which
|
|
||||||
// only evaluates symlinks before the last appearing path separator.
|
|
||||||
// Also note that it is okay if the last path element is a broken
|
|
||||||
// symlink as the copy operation should create the target.
|
|
||||||
var linkTarget string
|
|
||||||
|
|
||||||
linkTarget, err = os.Readlink(path)
|
|
||||||
if err != nil {
|
|
||||||
return CopyInfo{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if !system.IsAbs(linkTarget) {
|
|
||||||
// Join with the parent directory.
|
|
||||||
dstParent, _ := SplitPathDirEntry(path)
|
|
||||||
linkTarget = filepath.Join(dstParent, linkTarget)
|
|
||||||
}
|
|
||||||
|
|
||||||
path = linkTarget
|
|
||||||
stat, err = os.Lstat(path)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
// It's okay if the destination path doesn't exist. We can still
|
|
||||||
// continue the copy operation if the parent directory exists.
|
|
||||||
if !os.IsNotExist(err) {
|
|
||||||
return CopyInfo{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Ensure destination parent dir exists.
|
|
||||||
dstParent, _ := SplitPathDirEntry(path)
|
|
||||||
|
|
||||||
parentDirStat, err := os.Lstat(dstParent)
|
|
||||||
if err != nil {
|
|
||||||
return CopyInfo{}, err
|
|
||||||
}
|
|
||||||
if !parentDirStat.IsDir() {
|
|
||||||
return CopyInfo{}, ErrNotDirectory
|
|
||||||
}
|
|
||||||
|
|
||||||
return CopyInfo{Path: path}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// The path exists after resolving symlinks.
|
|
||||||
return CopyInfo{
|
|
||||||
Path: path,
|
|
||||||
Exists: true,
|
|
||||||
IsDir: stat.IsDir(),
|
|
||||||
}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// PrepareArchiveCopy prepares the given srcContent archive, which should
|
|
||||||
// contain the archived resource described by srcInfo, to the destination
|
|
||||||
// described by dstInfo. Returns the possibly modified content archive along
|
|
||||||
// with the path to the destination directory which it should be extracted to.
|
|
||||||
func PrepareArchiveCopy(srcContent Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content Archive, err error) {
|
|
||||||
// Ensure in platform semantics
|
|
||||||
srcInfo.Path = normalizePath(srcInfo.Path)
|
|
||||||
dstInfo.Path = normalizePath(dstInfo.Path)
|
|
||||||
|
|
||||||
// Separate the destination path between its directory and base
|
|
||||||
// components in case the source archive contents need to be rebased.
|
|
||||||
dstDir, dstBase := SplitPathDirEntry(dstInfo.Path)
|
|
||||||
_, srcBase := SplitPathDirEntry(srcInfo.Path)
|
|
||||||
|
|
||||||
switch {
|
|
||||||
case dstInfo.Exists && dstInfo.IsDir:
|
|
||||||
// The destination exists as a directory. No alteration
|
|
||||||
// to srcContent is needed as its contents can be
|
|
||||||
// simply extracted to the destination directory.
|
|
||||||
return dstInfo.Path, ioutil.NopCloser(srcContent), nil
|
|
||||||
case dstInfo.Exists && srcInfo.IsDir:
|
|
||||||
// The destination exists as some type of file and the source
|
|
||||||
// content is a directory. This is an error condition since
|
|
||||||
// you cannot copy a directory to an existing file location.
|
|
||||||
return "", nil, ErrCannotCopyDir
|
|
||||||
case dstInfo.Exists:
|
|
||||||
// The destination exists as some type of file and the source content
|
|
||||||
// is also a file. The source content entry will have to be renamed to
|
|
||||||
// have a basename which matches the destination path's basename.
|
|
||||||
if len(srcInfo.RebaseName) != 0 {
|
|
||||||
srcBase = srcInfo.RebaseName
|
|
||||||
}
|
|
||||||
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
|
|
||||||
case srcInfo.IsDir:
|
|
||||||
// The destination does not exist and the source content is an archive
|
|
||||||
// of a directory. The archive should be extracted to the parent of
|
|
||||||
// the destination path instead, and when it is, the directory that is
|
|
||||||
// created as a result should take the name of the destination path.
|
|
||||||
// The source content entries will have to be renamed to have a
|
|
||||||
// basename which matches the destination path's basename.
|
|
||||||
if len(srcInfo.RebaseName) != 0 {
|
|
||||||
srcBase = srcInfo.RebaseName
|
|
||||||
}
|
|
||||||
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
|
|
||||||
case assertsDirectory(dstInfo.Path):
|
|
||||||
// The destination does not exist and is asserted to be created as a
|
|
||||||
// directory, but the source content is not a directory. This is an
|
|
||||||
// error condition since you cannot create a directory from a file
|
|
||||||
// source.
|
|
||||||
return "", nil, ErrDirNotExists
|
|
||||||
default:
|
|
||||||
// The last remaining case is when the destination does not exist, is
|
|
||||||
// not asserted to be a directory, and the source content is not an
|
|
||||||
// archive of a directory. It this case, the destination file will need
|
|
||||||
// to be created when the archive is extracted and the source content
|
|
||||||
// entry will have to be renamed to have a basename which matches the
|
|
||||||
// destination path's basename.
|
|
||||||
if len(srcInfo.RebaseName) != 0 {
|
|
||||||
srcBase = srcInfo.RebaseName
|
|
||||||
}
|
|
||||||
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
// RebaseArchiveEntries rewrites the given srcContent archive replacing
|
|
||||||
// an occurrence of oldBase with newBase at the beginning of entry names.
|
|
||||||
func RebaseArchiveEntries(srcContent Reader, oldBase, newBase string) Archive {
|
|
||||||
if oldBase == string(os.PathSeparator) {
|
|
||||||
// If oldBase specifies the root directory, use an empty string as
|
|
||||||
// oldBase instead so that newBase doesn't replace the path separator
|
|
||||||
// that all paths will start with.
|
|
||||||
oldBase = ""
|
|
||||||
}
|
|
||||||
|
|
||||||
rebased, w := io.Pipe()
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
srcTar := tar.NewReader(srcContent)
|
|
||||||
rebasedTar := tar.NewWriter(w)
|
|
||||||
|
|
||||||
for {
|
|
||||||
hdr, err := srcTar.Next()
|
|
||||||
if err == io.EOF {
|
|
||||||
// Signals end of archive.
|
|
||||||
rebasedTar.Close()
|
|
||||||
w.Close()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
w.CloseWithError(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1)
|
|
||||||
|
|
||||||
if err = rebasedTar.WriteHeader(hdr); err != nil {
|
|
||||||
w.CloseWithError(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err = io.Copy(rebasedTar, srcTar); err != nil {
|
|
||||||
w.CloseWithError(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
return rebased
|
|
||||||
}
|
|
||||||
|
|
||||||
// CopyResource performs an archive copy from the given source path to the
|
|
||||||
// given destination path. The source path MUST exist and the destination
|
|
||||||
// path's parent directory must exist.
|
|
||||||
func CopyResource(srcPath, dstPath string, followLink bool) error {
|
|
||||||
var (
|
|
||||||
srcInfo CopyInfo
|
|
||||||
err error
|
|
||||||
)
|
|
||||||
|
|
||||||
// Ensure in platform semantics
|
|
||||||
srcPath = normalizePath(srcPath)
|
|
||||||
dstPath = normalizePath(dstPath)
|
|
||||||
|
|
||||||
// Clean the source and destination paths.
|
|
||||||
srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath)
|
|
||||||
dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath)
|
|
||||||
|
|
||||||
if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
content, err := TarResource(srcInfo)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer content.Close()
|
|
||||||
|
|
||||||
return CopyTo(content, srcInfo, dstPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CopyTo handles extracting the given content whose
|
|
||||||
// entries should be sourced from srcInfo to dstPath.
|
|
||||||
func CopyTo(content Reader, srcInfo CopyInfo, dstPath string) error {
|
|
||||||
// The destination path need not exist, but CopyInfoDestinationPath will
|
|
||||||
// ensure that at least the parent directory exists.
|
|
||||||
dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer copyArchive.Close()
|
|
||||||
|
|
||||||
options := &TarOptions{
|
|
||||||
NoLchown: true,
|
|
||||||
NoOverwriteDirNonDir: true,
|
|
||||||
}
|
|
||||||
|
|
||||||
return Untar(copyArchive, dstDir, options)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ResolveHostSourcePath decides real path need to be copied with parameters such as
|
|
||||||
// whether to follow symbol link or not, if followLink is true, resolvedPath will return
|
|
||||||
// link target of any symbol link file, else it will only resolve symlink of directory
|
|
||||||
// but return symbol link file itself without resolving.
|
|
||||||
func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) {
|
|
||||||
if followLink {
|
|
||||||
resolvedPath, err = filepath.EvalSymlinks(path)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
resolvedPath, rebaseName = GetRebaseName(path, resolvedPath)
|
|
||||||
} else {
|
|
||||||
dirPath, basePath := filepath.Split(path)
|
|
||||||
|
|
||||||
// if not follow symbol link, then resolve symbol link of parent dir
|
|
||||||
var resolvedDirPath string
|
|
||||||
resolvedDirPath, err = filepath.EvalSymlinks(dirPath)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// resolvedDirPath will have been cleaned (no trailing path separators) so
|
|
||||||
// we can manually join it with the base path element.
|
|
||||||
resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath
|
|
||||||
if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) {
|
|
||||||
rebaseName = filepath.Base(path)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return resolvedPath, rebaseName, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetRebaseName normalizes and compares path and resolvedPath,
|
|
||||||
// return completed resolved path and rebased file name
|
|
||||||
func GetRebaseName(path, resolvedPath string) (string, string) {
|
|
||||||
// linkTarget will have been cleaned (no trailing path separators and dot) so
|
|
||||||
// we can manually join it with them
|
|
||||||
var rebaseName string
|
|
||||||
if specifiesCurrentDir(path) && !specifiesCurrentDir(resolvedPath) {
|
|
||||||
resolvedPath += string(filepath.Separator) + "."
|
|
||||||
}
|
|
||||||
|
|
||||||
if hasTrailingPathSeparator(path) && !hasTrailingPathSeparator(resolvedPath) {
|
|
||||||
resolvedPath += string(filepath.Separator)
|
|
||||||
}
|
|
||||||
|
|
||||||
if filepath.Base(path) != filepath.Base(resolvedPath) {
|
|
||||||
// In the case where the path had a trailing separator and a symlink
|
|
||||||
// evaluation has changed the last path component, we will need to
|
|
||||||
// rebase the name in the archive that is being copied to match the
|
|
||||||
// originally requested name.
|
|
||||||
rebaseName = filepath.Base(path)
|
|
||||||
}
|
|
||||||
return resolvedPath, rebaseName
|
|
||||||
}
|
|
||||||
@@ -1,11 +0,0 @@
|
|||||||
// +build !windows
|
|
||||||
|
|
||||||
package archive
|
|
||||||
|
|
||||||
import (
|
|
||||||
"path/filepath"
|
|
||||||
)
|
|
||||||
|
|
||||||
func normalizePath(path string) string {
|
|
||||||
return filepath.ToSlash(path)
|
|
||||||
}
|
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
package archive
|
|
||||||
|
|
||||||
import (
|
|
||||||
"path/filepath"
|
|
||||||
)
|
|
||||||
|
|
||||||
func normalizePath(path string) string {
|
|
||||||
return filepath.FromSlash(path)
|
|
||||||
}
|
|
||||||
@@ -1,279 +0,0 @@
|
|||||||
package archive
|
|
||||||
|
|
||||||
import (
|
|
||||||
"archive/tar"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
|
|
||||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools"
|
|
||||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools"
|
|
||||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
|
|
||||||
)
|
|
||||||
|
|
||||||
// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be
|
|
||||||
// compressed or uncompressed.
|
|
||||||
// Returns the size in bytes of the contents of the layer.
|
|
||||||
func UnpackLayer(dest string, layer Reader, options *TarOptions) (size int64, err error) {
|
|
||||||
tr := tar.NewReader(layer)
|
|
||||||
trBuf := pools.BufioReader32KPool.Get(tr)
|
|
||||||
defer pools.BufioReader32KPool.Put(trBuf)
|
|
||||||
|
|
||||||
var dirs []*tar.Header
|
|
||||||
unpackedPaths := make(map[string]struct{})
|
|
||||||
|
|
||||||
if options == nil {
|
|
||||||
options = &TarOptions{}
|
|
||||||
}
|
|
||||||
if options.ExcludePatterns == nil {
|
|
||||||
options.ExcludePatterns = []string{}
|
|
||||||
}
|
|
||||||
remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
aufsTempdir := ""
|
|
||||||
aufsHardlinks := make(map[string]*tar.Header)
|
|
||||||
|
|
||||||
if options == nil {
|
|
||||||
options = &TarOptions{}
|
|
||||||
}
|
|
||||||
// Iterate through the files in the archive.
|
|
||||||
for {
|
|
||||||
hdr, err := tr.Next()
|
|
||||||
if err == io.EOF {
|
|
||||||
// end of tar archive
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
size += hdr.Size
|
|
||||||
|
|
||||||
// Normalize name, for safety and for a simple is-root check
|
|
||||||
hdr.Name = filepath.Clean(hdr.Name)
|
|
||||||
|
|
||||||
// Windows does not support filenames with colons in them. Ignore
|
|
||||||
// these files. This is not a problem though (although it might
|
|
||||||
// appear that it is). Let's suppose a client is running docker pull.
|
|
||||||
// The daemon it points to is Windows. Would it make sense for the
|
|
||||||
// client to be doing a docker pull Ubuntu for example (which has files
|
|
||||||
// with colons in the name under /usr/share/man/man3)? No, absolutely
|
|
||||||
// not as it would really only make sense that they were pulling a
|
|
||||||
// Windows image. However, for development, it is necessary to be able
|
|
||||||
// to pull Linux images which are in the repository.
|
|
||||||
//
|
|
||||||
// TODO Windows. Once the registry is aware of what images are Windows-
|
|
||||||
// specific or Linux-specific, this warning should be changed to an error
|
|
||||||
// to cater for the situation where someone does manage to upload a Linux
|
|
||||||
// image but have it tagged as Windows inadvertently.
|
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
if strings.Contains(hdr.Name, ":") {
|
|
||||||
logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Note as these operations are platform specific, so must the slash be.
|
|
||||||
if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
|
|
||||||
// Not the root directory, ensure that the parent directory exists.
|
|
||||||
// This happened in some tests where an image had a tarfile without any
|
|
||||||
// parent directories.
|
|
||||||
parent := filepath.Dir(hdr.Name)
|
|
||||||
parentPath := filepath.Join(dest, parent)
|
|
||||||
|
|
||||||
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
|
|
||||||
err = system.MkdirAll(parentPath, 0600)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Skip AUFS metadata dirs
|
|
||||||
if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) {
|
|
||||||
// Regular files inside /.wh..wh.plnk can be used as hardlink targets
|
|
||||||
// We don't want this directory, but we need the files in them so that
|
|
||||||
// such hardlinks can be resolved.
|
|
||||||
if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg {
|
|
||||||
basename := filepath.Base(hdr.Name)
|
|
||||||
aufsHardlinks[basename] = hdr
|
|
||||||
if aufsTempdir == "" {
|
|
||||||
if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
defer os.RemoveAll(aufsTempdir)
|
|
||||||
}
|
|
||||||
if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if hdr.Name != WhiteoutOpaqueDir {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
path := filepath.Join(dest, hdr.Name)
|
|
||||||
rel, err := filepath.Rel(dest, path)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Note as these operations are platform specific, so must the slash be.
|
|
||||||
if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
|
|
||||||
return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
|
|
||||||
}
|
|
||||||
base := filepath.Base(path)
|
|
||||||
|
|
||||||
if strings.HasPrefix(base, WhiteoutPrefix) {
|
|
||||||
dir := filepath.Dir(path)
|
|
||||||
if base == WhiteoutOpaqueDir {
|
|
||||||
_, err := os.Lstat(dir)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
|
|
||||||
if err != nil {
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
err = nil // parent was deleted
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
if path == dir {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if _, exists := unpackedPaths[path]; !exists {
|
|
||||||
err := os.RemoveAll(path)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
originalBase := base[len(WhiteoutPrefix):]
|
|
||||||
originalPath := filepath.Join(dir, originalBase)
|
|
||||||
if err := os.RemoveAll(originalPath); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// If path exits we almost always just want to remove and replace it.
|
|
||||||
// The only exception is when it is a directory *and* the file from
|
|
||||||
// the layer is also a directory. Then we want to merge them (i.e.
|
|
||||||
// just apply the metadata from the layer).
|
|
||||||
if fi, err := os.Lstat(path); err == nil {
|
|
||||||
if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
|
|
||||||
if err := os.RemoveAll(path); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
trBuf.Reset(tr)
|
|
||||||
srcData := io.Reader(trBuf)
|
|
||||||
srcHdr := hdr
|
|
||||||
|
|
||||||
// Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so
|
|
||||||
// we manually retarget these into the temporary files we extracted them into
|
|
||||||
if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) {
|
|
||||||
linkBasename := filepath.Base(hdr.Linkname)
|
|
||||||
srcHdr = aufsHardlinks[linkBasename]
|
|
||||||
if srcHdr == nil {
|
|
||||||
return 0, fmt.Errorf("Invalid aufs hardlink")
|
|
||||||
}
|
|
||||||
tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename))
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
defer tmpFile.Close()
|
|
||||||
srcData = tmpFile
|
|
||||||
}
|
|
||||||
|
|
||||||
// if the options contain a uid & gid maps, convert header uid/gid
|
|
||||||
// entries using the maps such that lchown sets the proper mapped
|
|
||||||
// uid/gid after writing the file. We only perform this mapping if
|
|
||||||
// the file isn't already owned by the remapped root UID or GID, as
|
|
||||||
// that specific uid/gid has no mapping from container -> host, and
|
|
||||||
// those files already have the proper ownership for inside the
|
|
||||||
// container.
|
|
||||||
if srcHdr.Uid != remappedRootUID {
|
|
||||||
xUID, err := idtools.ToHost(srcHdr.Uid, options.UIDMaps)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
srcHdr.Uid = xUID
|
|
||||||
}
|
|
||||||
if srcHdr.Gid != remappedRootGID {
|
|
||||||
xGID, err := idtools.ToHost(srcHdr.Gid, options.GIDMaps)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
srcHdr.Gid = xGID
|
|
||||||
}
|
|
||||||
if err := createTarFile(path, dest, srcHdr, srcData, true, nil); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Directory mtimes must be handled at the end to avoid further
|
|
||||||
// file creation in them to modify the directory mtime
|
|
||||||
if hdr.Typeflag == tar.TypeDir {
|
|
||||||
dirs = append(dirs, hdr)
|
|
||||||
}
|
|
||||||
unpackedPaths[path] = struct{}{}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, hdr := range dirs {
|
|
||||||
path := filepath.Join(dest, hdr.Name)
|
|
||||||
if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return size, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ApplyLayer parses a diff in the standard layer format from `layer`,
|
|
||||||
// and applies it to the directory `dest`. The stream `layer` can be
|
|
||||||
// compressed or uncompressed.
|
|
||||||
// Returns the size in bytes of the contents of the layer.
|
|
||||||
func ApplyLayer(dest string, layer Reader) (int64, error) {
|
|
||||||
return applyLayerHandler(dest, layer, &TarOptions{}, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ApplyUncompressedLayer parses a diff in the standard layer format from
|
|
||||||
// `layer`, and applies it to the directory `dest`. The stream `layer`
|
|
||||||
// can only be uncompressed.
|
|
||||||
// Returns the size in bytes of the contents of the layer.
|
|
||||||
func ApplyUncompressedLayer(dest string, layer Reader, options *TarOptions) (int64, error) {
|
|
||||||
return applyLayerHandler(dest, layer, options, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// do the bulk load of ApplyLayer, but allow for not calling DecompressStream
|
|
||||||
func applyLayerHandler(dest string, layer Reader, options *TarOptions, decompress bool) (int64, error) {
|
|
||||||
dest = filepath.Clean(dest)
|
|
||||||
|
|
||||||
// We need to be able to set any perms
|
|
||||||
oldmask, err := system.Umask(0)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform
|
|
||||||
|
|
||||||
if decompress {
|
|
||||||
layer, err = DecompressStream(layer)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return UnpackLayer(dest, layer, options)
|
|
||||||
}
|
|
||||||
@@ -1,97 +0,0 @@
|
|||||||
// +build ignore
|
|
||||||
|
|
||||||
// Simple tool to create an archive stream from an old and new directory
|
|
||||||
//
|
|
||||||
// By default it will stream the comparison of two temporary directories with junk files
|
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"flag"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"path"
|
|
||||||
|
|
||||||
"github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
|
|
||||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
flDebug = flag.Bool("D", false, "debugging output")
|
|
||||||
flNewDir = flag.String("newdir", "", "")
|
|
||||||
flOldDir = flag.String("olddir", "", "")
|
|
||||||
log = logrus.New()
|
|
||||||
)
|
|
||||||
|
|
||||||
func main() {
|
|
||||||
flag.Usage = func() {
|
|
||||||
fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)")
|
|
||||||
fmt.Printf("%s [OPTIONS]\n", os.Args[0])
|
|
||||||
flag.PrintDefaults()
|
|
||||||
}
|
|
||||||
flag.Parse()
|
|
||||||
log.Out = os.Stderr
|
|
||||||
if (len(os.Getenv("DEBUG")) > 0) || *flDebug {
|
|
||||||
logrus.SetLevel(logrus.DebugLevel)
|
|
||||||
}
|
|
||||||
var newDir, oldDir string
|
|
||||||
|
|
||||||
if len(*flNewDir) == 0 {
|
|
||||||
var err error
|
|
||||||
newDir, err = ioutil.TempDir("", "docker-test-newDir")
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
defer os.RemoveAll(newDir)
|
|
||||||
if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
newDir = *flNewDir
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(*flOldDir) == 0 {
|
|
||||||
oldDir, err := ioutil.TempDir("", "docker-test-oldDir")
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
defer os.RemoveAll(oldDir)
|
|
||||||
} else {
|
|
||||||
oldDir = *flOldDir
|
|
||||||
}
|
|
||||||
|
|
||||||
changes, err := archive.ChangesDirs(newDir, oldDir)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
a, err := archive.ExportChanges(newDir, changes)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
defer a.Close()
|
|
||||||
|
|
||||||
i, err := io.Copy(os.Stdout, a)
|
|
||||||
if err != nil && err != io.EOF {
|
|
||||||
log.Fatal(err)
|
|
||||||
}
|
|
||||||
fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i)
|
|
||||||
}
|
|
||||||
|
|
||||||
func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) {
|
|
||||||
fileData := []byte("fooo")
|
|
||||||
for n := 0; n < numberOfFiles; n++ {
|
|
||||||
fileName := fmt.Sprintf("file-%d", n)
|
|
||||||
if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
if makeLinks {
|
|
||||||
if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
totalSize := numberOfFiles * len(fileData)
|
|
||||||
return totalSize, nil
|
|
||||||
}
|
|
||||||
@@ -1,16 +0,0 @@
|
|||||||
package archive
|
|
||||||
|
|
||||||
import (
|
|
||||||
"syscall"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func timeToTimespec(time time.Time) (ts syscall.Timespec) {
|
|
||||||
if time.IsZero() {
|
|
||||||
// Return UTIME_OMIT special value
|
|
||||||
ts.Sec = 0
|
|
||||||
ts.Nsec = ((1 << 30) - 2)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
return syscall.NsecToTimespec(time.UnixNano())
|
|
||||||
}
|
|
||||||
@@ -1,16 +0,0 @@
|
|||||||
// +build !linux
|
|
||||||
|
|
||||||
package archive
|
|
||||||
|
|
||||||
import (
|
|
||||||
"syscall"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
func timeToTimespec(time time.Time) (ts syscall.Timespec) {
|
|
||||||
nsec := int64(0)
|
|
||||||
if !time.IsZero() {
|
|
||||||
nsec = time.UnixNano()
|
|
||||||
}
|
|
||||||
return syscall.NsecToTimespec(nsec)
|
|
||||||
}
|
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
package archive
|
|
||||||
|
|
||||||
// Whiteouts are files with a special meaning for the layered filesystem.
|
|
||||||
// Docker uses AUFS whiteout files inside exported archives. In other
|
|
||||||
// filesystems these files are generated/handled on tar creation/extraction.
|
|
||||||
|
|
||||||
// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a
|
|
||||||
// filename this means that file has been removed from the base layer.
|
|
||||||
const WhiteoutPrefix = ".wh."
|
|
||||||
|
|
||||||
// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not
|
|
||||||
// for removing an actual file. Normally these files are excluded from exported
|
|
||||||
// archives.
|
|
||||||
const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix
|
|
||||||
|
|
||||||
// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other
|
|
||||||
// layers. Normally these should not go into exported archives and all changed
|
|
||||||
// hardlinks should be copied to the top layer.
|
|
||||||
const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk"
|
|
||||||
|
|
||||||
// WhiteoutOpaqueDir file means directory has been made opaque - meaning
|
|
||||||
// readdir calls to this directory do not follow to lower layers.
|
|
||||||
const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq"
|
|
||||||
@@ -1,59 +0,0 @@
|
|||||||
package archive
|
|
||||||
|
|
||||||
import (
|
|
||||||
"archive/tar"
|
|
||||||
"bytes"
|
|
||||||
"io/ioutil"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Generate generates a new archive from the content provided
|
|
||||||
// as input.
|
|
||||||
//
|
|
||||||
// `files` is a sequence of path/content pairs. A new file is
|
|
||||||
// added to the archive for each pair.
|
|
||||||
// If the last pair is incomplete, the file is created with an
|
|
||||||
// empty content. For example:
|
|
||||||
//
|
|
||||||
// Generate("foo.txt", "hello world", "emptyfile")
|
|
||||||
//
|
|
||||||
// The above call will return an archive with 2 files:
|
|
||||||
// * ./foo.txt with content "hello world"
|
|
||||||
// * ./empty with empty content
|
|
||||||
//
|
|
||||||
// FIXME: stream content instead of buffering
|
|
||||||
// FIXME: specify permissions and other archive metadata
|
|
||||||
func Generate(input ...string) (Archive, error) {
|
|
||||||
files := parseStringPairs(input...)
|
|
||||||
buf := new(bytes.Buffer)
|
|
||||||
tw := tar.NewWriter(buf)
|
|
||||||
for _, file := range files {
|
|
||||||
name, content := file[0], file[1]
|
|
||||||
hdr := &tar.Header{
|
|
||||||
Name: name,
|
|
||||||
Size: int64(len(content)),
|
|
||||||
}
|
|
||||||
if err := tw.WriteHeader(hdr); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if _, err := tw.Write([]byte(content)); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err := tw.Close(); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return ioutil.NopCloser(buf), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseStringPairs(input ...string) (output [][2]string) {
|
|
||||||
output = make([][2]string, 0, len(input)/2+1)
|
|
||||||
for i := 0; i < len(input); i += 2 {
|
|
||||||
var pair [2]string
|
|
||||||
pair[0] = input[i]
|
|
||||||
if i+1 < len(input) {
|
|
||||||
pair[1] = input[i+1]
|
|
||||||
}
|
|
||||||
output = append(output, pair)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
@@ -1,279 +0,0 @@
|
|||||||
package fileutils
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"regexp"
|
|
||||||
"strings"
|
|
||||||
"text/scanner"
|
|
||||||
|
|
||||||
"github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
|
|
||||||
)
|
|
||||||
|
|
||||||
// exclusion return true if the specified pattern is an exclusion
|
|
||||||
func exclusion(pattern string) bool {
|
|
||||||
return pattern[0] == '!'
|
|
||||||
}
|
|
||||||
|
|
||||||
// empty return true if the specified pattern is empty
|
|
||||||
func empty(pattern string) bool {
|
|
||||||
return pattern == ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// CleanPatterns takes a slice of patterns returns a new
|
|
||||||
// slice of patterns cleaned with filepath.Clean, stripped
|
|
||||||
// of any empty patterns and lets the caller know whether the
|
|
||||||
// slice contains any exception patterns (prefixed with !).
|
|
||||||
func CleanPatterns(patterns []string) ([]string, [][]string, bool, error) {
|
|
||||||
// Loop over exclusion patterns and:
|
|
||||||
// 1. Clean them up.
|
|
||||||
// 2. Indicate whether we are dealing with any exception rules.
|
|
||||||
// 3. Error if we see a single exclusion marker on it's own (!).
|
|
||||||
cleanedPatterns := []string{}
|
|
||||||
patternDirs := [][]string{}
|
|
||||||
exceptions := false
|
|
||||||
for _, pattern := range patterns {
|
|
||||||
// Eliminate leading and trailing whitespace.
|
|
||||||
pattern = strings.TrimSpace(pattern)
|
|
||||||
if empty(pattern) {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if exclusion(pattern) {
|
|
||||||
if len(pattern) == 1 {
|
|
||||||
return nil, nil, false, errors.New("Illegal exclusion pattern: !")
|
|
||||||
}
|
|
||||||
exceptions = true
|
|
||||||
}
|
|
||||||
pattern = filepath.Clean(pattern)
|
|
||||||
cleanedPatterns = append(cleanedPatterns, pattern)
|
|
||||||
if exclusion(pattern) {
|
|
||||||
pattern = pattern[1:]
|
|
||||||
}
|
|
||||||
patternDirs = append(patternDirs, strings.Split(pattern, "/"))
|
|
||||||
}
|
|
||||||
|
|
||||||
return cleanedPatterns, patternDirs, exceptions, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Matches returns true if file matches any of the patterns
|
|
||||||
// and isn't excluded by any of the subsequent patterns.
|
|
||||||
func Matches(file string, patterns []string) (bool, error) {
|
|
||||||
file = filepath.Clean(file)
|
|
||||||
|
|
||||||
if file == "." {
|
|
||||||
// Don't let them exclude everything, kind of silly.
|
|
||||||
return false, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
patterns, patDirs, _, err := CleanPatterns(patterns)
|
|
||||||
if err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return OptimizedMatches(file, patterns, patDirs)
|
|
||||||
}
|
|
||||||
|
|
||||||
// OptimizedMatches is basically the same as fileutils.Matches() but optimized for archive.go.
|
|
||||||
// It will assume that the inputs have been preprocessed and therefore the function
|
|
||||||
// doesn't need to do as much error checking and clean-up. This was done to avoid
|
|
||||||
// repeating these steps on each file being checked during the archive process.
|
|
||||||
// The more generic fileutils.Matches() can't make these assumptions.
|
|
||||||
func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, error) {
|
|
||||||
matched := false
|
|
||||||
parentPath := filepath.Dir(file)
|
|
||||||
parentPathDirs := strings.Split(parentPath, "/")
|
|
||||||
|
|
||||||
for i, pattern := range patterns {
|
|
||||||
negative := false
|
|
||||||
|
|
||||||
if exclusion(pattern) {
|
|
||||||
negative = true
|
|
||||||
pattern = pattern[1:]
|
|
||||||
}
|
|
||||||
|
|
||||||
match, err := regexpMatch(pattern, file)
|
|
||||||
if err != nil {
|
|
||||||
return false, fmt.Errorf("Error in pattern (%s): %s", pattern, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !match && parentPath != "." {
|
|
||||||
// Check to see if the pattern matches one of our parent dirs.
|
|
||||||
if len(patDirs[i]) <= len(parentPathDirs) {
|
|
||||||
match, _ = regexpMatch(strings.Join(patDirs[i], "/"),
|
|
||||||
strings.Join(parentPathDirs[:len(patDirs[i])], "/"))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if match {
|
|
||||||
matched = !negative
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if matched {
|
|
||||||
logrus.Debugf("Skipping excluded path: %s", file)
|
|
||||||
}
|
|
||||||
|
|
||||||
return matched, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// regexpMatch tries to match the logic of filepath.Match but
|
|
||||||
// does so using regexp logic. We do this so that we can expand the
|
|
||||||
// wildcard set to include other things, like "**" to mean any number
|
|
||||||
// of directories. This means that we should be backwards compatible
|
|
||||||
// with filepath.Match(). We'll end up supporting more stuff, due to
|
|
||||||
// the fact that we're using regexp, but that's ok - it does no harm.
|
|
||||||
func regexpMatch(pattern, path string) (bool, error) {
|
|
||||||
regStr := "^"
|
|
||||||
|
|
||||||
// Do some syntax checking on the pattern.
|
|
||||||
// filepath's Match() has some really weird rules that are inconsistent
|
|
||||||
// so instead of trying to dup their logic, just call Match() for its
|
|
||||||
// error state and if there is an error in the pattern return it.
|
|
||||||
// If this becomes an issue we can remove this since its really only
|
|
||||||
// needed in the error (syntax) case - which isn't really critical.
|
|
||||||
if _, err := filepath.Match(pattern, path); err != nil {
|
|
||||||
return false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Go through the pattern and convert it to a regexp.
|
|
||||||
// We use a scanner so we can support utf-8 chars.
|
|
||||||
var scan scanner.Scanner
|
|
||||||
scan.Init(strings.NewReader(pattern))
|
|
||||||
|
|
||||||
sl := string(os.PathSeparator)
|
|
||||||
escSL := sl
|
|
||||||
if sl == `\` {
|
|
||||||
escSL += `\`
|
|
||||||
}
|
|
||||||
|
|
||||||
for scan.Peek() != scanner.EOF {
|
|
||||||
ch := scan.Next()
|
|
||||||
|
|
||||||
if ch == '*' {
|
|
||||||
if scan.Peek() == '*' {
|
|
||||||
// is some flavor of "**"
|
|
||||||
scan.Next()
|
|
||||||
|
|
||||||
if scan.Peek() == scanner.EOF {
|
|
||||||
// is "**EOF" - to align with .gitignore just accept all
|
|
||||||
regStr += ".*"
|
|
||||||
} else {
|
|
||||||
// is "**"
|
|
||||||
regStr += "((.*" + escSL + ")|([^" + escSL + "]*))"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Treat **/ as ** so eat the "/"
|
|
||||||
if string(scan.Peek()) == sl {
|
|
||||||
scan.Next()
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// is "*" so map it to anything but "/"
|
|
||||||
regStr += "[^" + escSL + "]*"
|
|
||||||
}
|
|
||||||
} else if ch == '?' {
|
|
||||||
// "?" is any char except "/"
|
|
||||||
regStr += "[^" + escSL + "]"
|
|
||||||
} else if strings.Index(".$", string(ch)) != -1 {
|
|
||||||
// Escape some regexp special chars that have no meaning
|
|
||||||
// in golang's filepath.Match
|
|
||||||
regStr += `\` + string(ch)
|
|
||||||
} else if ch == '\\' {
|
|
||||||
// escape next char. Note that a trailing \ in the pattern
|
|
||||||
// will be left alone (but need to escape it)
|
|
||||||
if sl == `\` {
|
|
||||||
// On windows map "\" to "\\", meaning an escaped backslash,
|
|
||||||
// and then just continue because filepath.Match on
|
|
||||||
// Windows doesn't allow escaping at all
|
|
||||||
regStr += escSL
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if scan.Peek() != scanner.EOF {
|
|
||||||
regStr += `\` + string(scan.Next())
|
|
||||||
} else {
|
|
||||||
regStr += `\`
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
regStr += string(ch)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
regStr += "$"
|
|
||||||
|
|
||||||
res, err := regexp.MatchString(regStr, path)
|
|
||||||
|
|
||||||
// Map regexp's error to filepath's so no one knows we're not using filepath
|
|
||||||
if err != nil {
|
|
||||||
err = filepath.ErrBadPattern
|
|
||||||
}
|
|
||||||
|
|
||||||
return res, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// CopyFile copies from src to dst until either EOF is reached
|
|
||||||
// on src or an error occurs. It verifies src exists and remove
|
|
||||||
// the dst if it exists.
|
|
||||||
func CopyFile(src, dst string) (int64, error) {
|
|
||||||
cleanSrc := filepath.Clean(src)
|
|
||||||
cleanDst := filepath.Clean(dst)
|
|
||||||
if cleanSrc == cleanDst {
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
sf, err := os.Open(cleanSrc)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
defer sf.Close()
|
|
||||||
if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
df, err := os.Create(cleanDst)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
defer df.Close()
|
|
||||||
return io.Copy(df, sf)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadSymlinkedDirectory returns the target directory of a symlink.
|
|
||||||
// The target of the symbolic link may not be a file.
|
|
||||||
func ReadSymlinkedDirectory(path string) (string, error) {
|
|
||||||
var realPath string
|
|
||||||
var err error
|
|
||||||
if realPath, err = filepath.Abs(path); err != nil {
|
|
||||||
return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err)
|
|
||||||
}
|
|
||||||
if realPath, err = filepath.EvalSymlinks(realPath); err != nil {
|
|
||||||
return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err)
|
|
||||||
}
|
|
||||||
realPathInfo, err := os.Stat(realPath)
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err)
|
|
||||||
}
|
|
||||||
if !realPathInfo.Mode().IsDir() {
|
|
||||||
return "", fmt.Errorf("canonical path points to a file '%s'", realPath)
|
|
||||||
}
|
|
||||||
return realPath, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateIfNotExists creates a file or a directory only if it does not already exist.
|
|
||||||
func CreateIfNotExists(path string, isDir bool) error {
|
|
||||||
if _, err := os.Stat(path); err != nil {
|
|
||||||
if os.IsNotExist(err) {
|
|
||||||
if isDir {
|
|
||||||
return os.MkdirAll(path, 0755)
|
|
||||||
}
|
|
||||||
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
f, err := os.OpenFile(path, os.O_CREATE, 0755)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
f.Close()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,22 +0,0 @@
|
|||||||
// +build linux freebsd
|
|
||||||
|
|
||||||
package fileutils
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus"
|
|
||||||
)
|
|
||||||
|
|
||||||
// GetTotalUsedFds Returns the number of used File Descriptors by
|
|
||||||
// reading it via /proc filesystem.
|
|
||||||
func GetTotalUsedFds() int {
|
|
||||||
if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
|
|
||||||
logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err)
|
|
||||||
} else {
|
|
||||||
return len(fds)
|
|
||||||
}
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
package fileutils
|
|
||||||
|
|
||||||
// GetTotalUsedFds Returns the number of used File Descriptors. Not supported
|
|
||||||
// on Windows.
|
|
||||||
func GetTotalUsedFds() int {
|
|
||||||
return -1
|
|
||||||
}
|
|
||||||
@@ -1,39 +0,0 @@
|
|||||||
package homedir
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"runtime"
|
|
||||||
|
|
||||||
"github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Key returns the env var name for the user's home dir based on
|
|
||||||
// the platform being run on
|
|
||||||
func Key() string {
|
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
return "USERPROFILE"
|
|
||||||
}
|
|
||||||
return "HOME"
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get returns the home directory of the current user with the help of
|
|
||||||
// environment variables depending on the target operating system.
|
|
||||||
// Returned path should be used with "path/filepath" to form new paths.
|
|
||||||
func Get() string {
|
|
||||||
home := os.Getenv(Key())
|
|
||||||
if home == "" && runtime.GOOS != "windows" {
|
|
||||||
if u, err := user.CurrentUser(); err == nil {
|
|
||||||
return u.Home
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return home
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetShortcutString returns the string that is shortcut to user's home directory
|
|
||||||
// in the native shell of the platform running on.
|
|
||||||
func GetShortcutString() string {
|
|
||||||
if runtime.GOOS == "windows" {
|
|
||||||
return "%USERPROFILE%" // be careful while using in format functions
|
|
||||||
}
|
|
||||||
return "~"
|
|
||||||
}
|
|
||||||
@@ -1,195 +0,0 @@
|
|||||||
package idtools
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
// IDMap contains a single entry for user namespace range remapping. An array
|
|
||||||
// of IDMap entries represents the structure that will be provided to the Linux
|
|
||||||
// kernel for creating a user namespace.
|
|
||||||
type IDMap struct {
|
|
||||||
ContainerID int `json:"container_id"`
|
|
||||||
HostID int `json:"host_id"`
|
|
||||||
Size int `json:"size"`
|
|
||||||
}
|
|
||||||
|
|
||||||
type subIDRange struct {
|
|
||||||
Start int
|
|
||||||
Length int
|
|
||||||
}
|
|
||||||
|
|
||||||
type ranges []subIDRange
|
|
||||||
|
|
||||||
func (e ranges) Len() int { return len(e) }
|
|
||||||
func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
|
|
||||||
func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start }
|
|
||||||
|
|
||||||
const (
|
|
||||||
subuidFileName string = "/etc/subuid"
|
|
||||||
subgidFileName string = "/etc/subgid"
|
|
||||||
)
|
|
||||||
|
|
||||||
// MkdirAllAs creates a directory (include any along the path) and then modifies
|
|
||||||
// ownership to the requested uid/gid. If the directory already exists, this
|
|
||||||
// function will still change ownership to the requested uid/gid pair.
|
|
||||||
func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
|
|
||||||
return mkdirAs(path, mode, ownerUID, ownerGID, true, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MkdirAllNewAs creates a directory (include any along the path) and then modifies
|
|
||||||
// ownership ONLY of newly created directories to the requested uid/gid. If the
|
|
||||||
// directories along the path exist, no change of ownership will be performed
|
|
||||||
func MkdirAllNewAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
|
|
||||||
return mkdirAs(path, mode, ownerUID, ownerGID, true, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
// MkdirAs creates a directory and then modifies ownership to the requested uid/gid.
|
|
||||||
// If the directory already exists, this function still changes ownership
|
|
||||||
func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
|
|
||||||
return mkdirAs(path, mode, ownerUID, ownerGID, false, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps.
|
|
||||||
// If the maps are empty, then the root uid/gid will default to "real" 0/0
|
|
||||||
func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
|
|
||||||
var uid, gid int
|
|
||||||
|
|
||||||
if uidMap != nil {
|
|
||||||
xUID, err := ToHost(0, uidMap)
|
|
||||||
if err != nil {
|
|
||||||
return -1, -1, err
|
|
||||||
}
|
|
||||||
uid = xUID
|
|
||||||
}
|
|
||||||
if gidMap != nil {
|
|
||||||
xGID, err := ToHost(0, gidMap)
|
|
||||||
if err != nil {
|
|
||||||
return -1, -1, err
|
|
||||||
}
|
|
||||||
gid = xGID
|
|
||||||
}
|
|
||||||
return uid, gid, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToContainer takes an id mapping, and uses it to translate a
|
|
||||||
// host ID to the remapped ID. If no map is provided, then the translation
|
|
||||||
// assumes a 1-to-1 mapping and returns the passed in id
|
|
||||||
func ToContainer(hostID int, idMap []IDMap) (int, error) {
|
|
||||||
if idMap == nil {
|
|
||||||
return hostID, nil
|
|
||||||
}
|
|
||||||
for _, m := range idMap {
|
|
||||||
if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) {
|
|
||||||
contID := m.ContainerID + (hostID - m.HostID)
|
|
||||||
return contID, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ToHost takes an id mapping and a remapped ID, and translates the
|
|
||||||
// ID to the mapped host ID. If no map is provided, then the translation
|
|
||||||
// assumes a 1-to-1 mapping and returns the passed in id #
|
|
||||||
func ToHost(contID int, idMap []IDMap) (int, error) {
|
|
||||||
if idMap == nil {
|
|
||||||
return contID, nil
|
|
||||||
}
|
|
||||||
for _, m := range idMap {
|
|
||||||
if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) {
|
|
||||||
hostID := m.HostID + (contID - m.ContainerID)
|
|
||||||
return hostID, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID)
|
|
||||||
}
|
|
||||||
|
|
||||||
// CreateIDMappings takes a requested user and group name and
|
|
||||||
// using the data from /etc/sub{uid,gid} ranges, creates the
|
|
||||||
// proper uid and gid remapping ranges for that user/group pair
|
|
||||||
func CreateIDMappings(username, groupname string) ([]IDMap, []IDMap, error) {
|
|
||||||
subuidRanges, err := parseSubuid(username)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
subgidRanges, err := parseSubgid(groupname)
|
|
||||||
if err != nil {
|
|
||||||
return nil, nil, err
|
|
||||||
}
|
|
||||||
if len(subuidRanges) == 0 {
|
|
||||||
return nil, nil, fmt.Errorf("No subuid ranges found for user %q", username)
|
|
||||||
}
|
|
||||||
if len(subgidRanges) == 0 {
|
|
||||||
return nil, nil, fmt.Errorf("No subgid ranges found for group %q", groupname)
|
|
||||||
}
|
|
||||||
|
|
||||||
return createIDMap(subuidRanges), createIDMap(subgidRanges), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func createIDMap(subidRanges ranges) []IDMap {
|
|
||||||
idMap := []IDMap{}
|
|
||||||
|
|
||||||
// sort the ranges by lowest ID first
|
|
||||||
sort.Sort(subidRanges)
|
|
||||||
containerID := 0
|
|
||||||
for _, idrange := range subidRanges {
|
|
||||||
idMap = append(idMap, IDMap{
|
|
||||||
ContainerID: containerID,
|
|
||||||
HostID: idrange.Start,
|
|
||||||
Size: idrange.Length,
|
|
||||||
})
|
|
||||||
containerID = containerID + idrange.Length
|
|
||||||
}
|
|
||||||
return idMap
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseSubuid(username string) (ranges, error) {
|
|
||||||
return parseSubidFile(subuidFileName, username)
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseSubgid(username string) (ranges, error) {
|
|
||||||
return parseSubidFile(subgidFileName, username)
|
|
||||||
}
|
|
||||||
|
|
||||||
func parseSubidFile(path, username string) (ranges, error) {
|
|
||||||
var rangeList ranges
|
|
||||||
|
|
||||||
subidFile, err := os.Open(path)
|
|
||||||
if err != nil {
|
|
||||||
return rangeList, err
|
|
||||||
}
|
|
||||||
defer subidFile.Close()
|
|
||||||
|
|
||||||
s := bufio.NewScanner(subidFile)
|
|
||||||
for s.Scan() {
|
|
||||||
if err := s.Err(); err != nil {
|
|
||||||
return rangeList, err
|
|
||||||
}
|
|
||||||
|
|
||||||
text := strings.TrimSpace(s.Text())
|
|
||||||
if text == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
parts := strings.Split(text, ":")
|
|
||||||
if len(parts) != 3 {
|
|
||||||
return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path)
|
|
||||||
}
|
|
||||||
if parts[0] == username {
|
|
||||||
// return the first entry for a user; ignores potential for multiple ranges per user
|
|
||||||
startid, err := strconv.Atoi(parts[1])
|
|
||||||
if err != nil {
|
|
||||||
return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err)
|
|
||||||
}
|
|
||||||
length, err := strconv.Atoi(parts[2])
|
|
||||||
if err != nil {
|
|
||||||
return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err)
|
|
||||||
}
|
|
||||||
rangeList = append(rangeList, subIDRange{startid, length})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return rangeList, nil
|
|
||||||
}
|
|
||||||
@@ -1,60 +0,0 @@
|
|||||||
// +build !windows
|
|
||||||
|
|
||||||
package idtools
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
|
|
||||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
|
|
||||||
)
|
|
||||||
|
|
||||||
func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error {
|
|
||||||
// make an array containing the original path asked for, plus (for mkAll == true)
|
|
||||||
// all path components leading up to the complete path that don't exist before we MkdirAll
|
|
||||||
// so that we can chown all of them properly at the end. If chownExisting is false, we won't
|
|
||||||
// chown the full directory path if it exists
|
|
||||||
var paths []string
|
|
||||||
if _, err := os.Stat(path); err != nil && os.IsNotExist(err) {
|
|
||||||
paths = []string{path}
|
|
||||||
} else if err == nil && chownExisting {
|
|
||||||
if err := os.Chown(path, ownerUID, ownerGID); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// short-circuit--we were called with an existing directory and chown was requested
|
|
||||||
return nil
|
|
||||||
} else if err == nil {
|
|
||||||
// nothing to do; directory path fully exists already and chown was NOT requested
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if mkAll {
|
|
||||||
// walk back to "/" looking for directories which do not exist
|
|
||||||
// and add them to the paths array for chown after creation
|
|
||||||
dirPath := path
|
|
||||||
for {
|
|
||||||
dirPath = filepath.Dir(dirPath)
|
|
||||||
if dirPath == "/" {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) {
|
|
||||||
paths = append(paths, dirPath)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// even if it existed, we will chown the requested path + any subpaths that
|
|
||||||
// didn't exist when we called MkdirAll
|
|
||||||
for _, pathComponent := range paths {
|
|
||||||
if err := os.Chown(pathComponent, ownerUID, ownerGID); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,18 +0,0 @@
|
|||||||
// +build windows
|
|
||||||
|
|
||||||
package idtools
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
|
|
||||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Platforms such as Windows do not support the UID/GID concept. So make this
|
|
||||||
// just a wrapper around system.MkdirAll.
|
|
||||||
func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error {
|
|
||||||
if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,155 +0,0 @@
|
|||||||
package idtools
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os/exec"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"syscall"
|
|
||||||
)
|
|
||||||
|
|
||||||
// add a user and/or group to Linux /etc/passwd, /etc/group using standard
|
|
||||||
// Linux distribution commands:
|
|
||||||
// adduser --uid <id> --shell /bin/login --no-create-home --disabled-login --ingroup <groupname> <username>
|
|
||||||
// useradd -M -u <id> -s /bin/nologin -N -g <groupname> <username>
|
|
||||||
// addgroup --gid <id> <groupname>
|
|
||||||
// groupadd -g <id> <groupname>
|
|
||||||
|
|
||||||
const baseUID int = 10000
|
|
||||||
const baseGID int = 10000
|
|
||||||
const idMAX int = 65534
|
|
||||||
|
|
||||||
var (
|
|
||||||
userCommand string
|
|
||||||
groupCommand string
|
|
||||||
|
|
||||||
cmdTemplates = map[string]string{
|
|
||||||
"adduser": "--uid %d --shell /bin/false --no-create-home --disabled-login --ingroup %s %s",
|
|
||||||
"useradd": "-M -u %d -s /bin/false -N -g %s %s",
|
|
||||||
"addgroup": "--gid %d %s",
|
|
||||||
"groupadd": "-g %d %s",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
// set up which commands are used for adding users/groups dependent on distro
|
|
||||||
if _, err := resolveBinary("adduser"); err == nil {
|
|
||||||
userCommand = "adduser"
|
|
||||||
} else if _, err := resolveBinary("useradd"); err == nil {
|
|
||||||
userCommand = "useradd"
|
|
||||||
}
|
|
||||||
if _, err := resolveBinary("addgroup"); err == nil {
|
|
||||||
groupCommand = "addgroup"
|
|
||||||
} else if _, err := resolveBinary("groupadd"); err == nil {
|
|
||||||
groupCommand = "groupadd"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func resolveBinary(binname string) (string, error) {
|
|
||||||
binaryPath, err := exec.LookPath(binname)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
resolvedPath, err := filepath.EvalSymlinks(binaryPath)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
//only return no error if the final resolved binary basename
|
|
||||||
//matches what was searched for
|
|
||||||
if filepath.Base(resolvedPath) == binname {
|
|
||||||
return resolvedPath, nil
|
|
||||||
}
|
|
||||||
return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair
|
|
||||||
// and calls the appropriate helper function to add the group and then
|
|
||||||
// the user to the group in /etc/group and /etc/passwd respectively.
|
|
||||||
// This new user's /etc/sub{uid,gid} ranges will be used for user namespace
|
|
||||||
// mapping ranges in containers.
|
|
||||||
func AddNamespaceRangesUser(name string) (int, int, error) {
|
|
||||||
// Find unused uid, gid pair
|
|
||||||
uid, err := findUnusedUID(baseUID)
|
|
||||||
if err != nil {
|
|
||||||
return -1, -1, fmt.Errorf("Unable to find unused UID: %v", err)
|
|
||||||
}
|
|
||||||
gid, err := findUnusedGID(baseGID)
|
|
||||||
if err != nil {
|
|
||||||
return -1, -1, fmt.Errorf("Unable to find unused GID: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// First add the group that we will use
|
|
||||||
if err := addGroup(name, gid); err != nil {
|
|
||||||
return -1, -1, fmt.Errorf("Error adding group %q: %v", name, err)
|
|
||||||
}
|
|
||||||
// Add the user as a member of the group
|
|
||||||
if err := addUser(name, uid, name); err != nil {
|
|
||||||
return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err)
|
|
||||||
}
|
|
||||||
return uid, gid, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func addUser(userName string, uid int, groupName string) error {
|
|
||||||
|
|
||||||
if userCommand == "" {
|
|
||||||
return fmt.Errorf("Cannot add user; no useradd/adduser binary found")
|
|
||||||
}
|
|
||||||
args := fmt.Sprintf(cmdTemplates[userCommand], uid, groupName, userName)
|
|
||||||
return execAddCmd(userCommand, args)
|
|
||||||
}
|
|
||||||
|
|
||||||
func addGroup(groupName string, gid int) error {
|
|
||||||
|
|
||||||
if groupCommand == "" {
|
|
||||||
return fmt.Errorf("Cannot add group; no groupadd/addgroup binary found")
|
|
||||||
}
|
|
||||||
args := fmt.Sprintf(cmdTemplates[groupCommand], gid, groupName)
|
|
||||||
// only error out if the error isn't that the group already exists
|
|
||||||
// if the group exists then our needs are already met
|
|
||||||
if err := execAddCmd(groupCommand, args); err != nil && !strings.Contains(err.Error(), "already exists") {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func execAddCmd(cmd, args string) error {
|
|
||||||
execCmd := exec.Command(cmd, strings.Split(args, " ")...)
|
|
||||||
out, err := execCmd.CombinedOutput()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("Failed to add user/group with error: %v; output: %q", err, string(out))
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func findUnusedUID(startUID int) (int, error) {
|
|
||||||
return findUnused("passwd", startUID)
|
|
||||||
}
|
|
||||||
|
|
||||||
func findUnusedGID(startGID int) (int, error) {
|
|
||||||
return findUnused("group", startGID)
|
|
||||||
}
|
|
||||||
|
|
||||||
func findUnused(file string, id int) (int, error) {
|
|
||||||
for {
|
|
||||||
cmdStr := fmt.Sprintf("cat /etc/%s | cut -d: -f3 | grep '^%d$'", file, id)
|
|
||||||
cmd := exec.Command("sh", "-c", cmdStr)
|
|
||||||
if err := cmd.Run(); err != nil {
|
|
||||||
// if a non-zero return code occurs, then we know the ID was not found
|
|
||||||
// and is usable
|
|
||||||
if exiterr, ok := err.(*exec.ExitError); ok {
|
|
||||||
// The program has exited with an exit code != 0
|
|
||||||
if status, ok := exiterr.Sys().(syscall.WaitStatus); ok {
|
|
||||||
if status.ExitStatus() == 1 {
|
|
||||||
//no match, we can use this ID
|
|
||||||
return id, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return -1, fmt.Errorf("Error looking in /etc/%s for unused ID: %v", file, err)
|
|
||||||
}
|
|
||||||
id++
|
|
||||||
if id > idMAX {
|
|
||||||
return -1, fmt.Errorf("Maximum id in %q reached with finding unused numeric ID", file)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
// +build !linux
|
|
||||||
|
|
||||||
package idtools
|
|
||||||
|
|
||||||
import "fmt"
|
|
||||||
|
|
||||||
// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair
|
|
||||||
// and calls the appropriate helper function to add the group and then
|
|
||||||
// the user to the group in /etc/group and /etc/passwd respectively.
|
|
||||||
func AddNamespaceRangesUser(name string) (int, int, error) {
|
|
||||||
return -1, -1, fmt.Errorf("No support for adding users or groups on this OS")
|
|
||||||
}
|
|
||||||
@@ -1,152 +0,0 @@
|
|||||||
package ioutils
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
// maxCap is the highest capacity to use in byte slices that buffer data.
|
|
||||||
const maxCap = 1e6
|
|
||||||
|
|
||||||
// blockThreshold is the minimum number of bytes in the buffer which will cause
|
|
||||||
// a write to BytesPipe to block when allocating a new slice.
|
|
||||||
const blockThreshold = 1e6
|
|
||||||
|
|
||||||
// ErrClosed is returned when Write is called on a closed BytesPipe.
|
|
||||||
var ErrClosed = errors.New("write to closed BytesPipe")
|
|
||||||
|
|
||||||
// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue).
|
|
||||||
// All written data may be read at most once. Also, BytesPipe allocates
|
|
||||||
// and releases new byte slices to adjust to current needs, so the buffer
|
|
||||||
// won't be overgrown after peak loads.
|
|
||||||
type BytesPipe struct {
|
|
||||||
mu sync.Mutex
|
|
||||||
wait *sync.Cond
|
|
||||||
buf [][]byte // slice of byte-slices of buffered data
|
|
||||||
lastRead int // index in the first slice to a read point
|
|
||||||
bufLen int // length of data buffered over the slices
|
|
||||||
closeErr error // error to return from next Read. set to nil if not closed.
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewBytesPipe creates new BytesPipe, initialized by specified slice.
|
|
||||||
// If buf is nil, then it will be initialized with slice which cap is 64.
|
|
||||||
// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf).
|
|
||||||
func NewBytesPipe(buf []byte) *BytesPipe {
|
|
||||||
if cap(buf) == 0 {
|
|
||||||
buf = make([]byte, 0, 64)
|
|
||||||
}
|
|
||||||
bp := &BytesPipe{
|
|
||||||
buf: [][]byte{buf[:0]},
|
|
||||||
}
|
|
||||||
bp.wait = sync.NewCond(&bp.mu)
|
|
||||||
return bp
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write writes p to BytesPipe.
|
|
||||||
// It can allocate new []byte slices in a process of writing.
|
|
||||||
func (bp *BytesPipe) Write(p []byte) (int, error) {
|
|
||||||
bp.mu.Lock()
|
|
||||||
defer bp.mu.Unlock()
|
|
||||||
written := 0
|
|
||||||
for {
|
|
||||||
if bp.closeErr != nil {
|
|
||||||
return written, ErrClosed
|
|
||||||
}
|
|
||||||
// write data to the last buffer
|
|
||||||
b := bp.buf[len(bp.buf)-1]
|
|
||||||
// copy data to the current empty allocated area
|
|
||||||
n := copy(b[len(b):cap(b)], p)
|
|
||||||
// increment buffered data length
|
|
||||||
bp.bufLen += n
|
|
||||||
// include written data in last buffer
|
|
||||||
bp.buf[len(bp.buf)-1] = b[:len(b)+n]
|
|
||||||
|
|
||||||
written += n
|
|
||||||
|
|
||||||
// if there was enough room to write all then break
|
|
||||||
if len(p) == n {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
// more data: write to the next slice
|
|
||||||
p = p[n:]
|
|
||||||
|
|
||||||
// block if too much data is still in the buffer
|
|
||||||
for bp.bufLen >= blockThreshold {
|
|
||||||
bp.wait.Wait()
|
|
||||||
}
|
|
||||||
|
|
||||||
// allocate slice that has twice the size of the last unless maximum reached
|
|
||||||
nextCap := 2 * cap(bp.buf[len(bp.buf)-1])
|
|
||||||
if nextCap > maxCap {
|
|
||||||
nextCap = maxCap
|
|
||||||
}
|
|
||||||
// add new byte slice to the buffers slice and continue writing
|
|
||||||
bp.buf = append(bp.buf, make([]byte, 0, nextCap))
|
|
||||||
}
|
|
||||||
bp.wait.Broadcast()
|
|
||||||
return written, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// CloseWithError causes further reads from a BytesPipe to return immediately.
|
|
||||||
func (bp *BytesPipe) CloseWithError(err error) error {
|
|
||||||
bp.mu.Lock()
|
|
||||||
if err != nil {
|
|
||||||
bp.closeErr = err
|
|
||||||
} else {
|
|
||||||
bp.closeErr = io.EOF
|
|
||||||
}
|
|
||||||
bp.wait.Broadcast()
|
|
||||||
bp.mu.Unlock()
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close causes further reads from a BytesPipe to return immediately.
|
|
||||||
func (bp *BytesPipe) Close() error {
|
|
||||||
return bp.CloseWithError(nil)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (bp *BytesPipe) len() int {
|
|
||||||
return bp.bufLen - bp.lastRead
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read reads bytes from BytesPipe.
|
|
||||||
// Data could be read only once.
|
|
||||||
func (bp *BytesPipe) Read(p []byte) (n int, err error) {
|
|
||||||
bp.mu.Lock()
|
|
||||||
defer bp.mu.Unlock()
|
|
||||||
if bp.len() == 0 {
|
|
||||||
if bp.closeErr != nil {
|
|
||||||
return 0, bp.closeErr
|
|
||||||
}
|
|
||||||
bp.wait.Wait()
|
|
||||||
if bp.len() == 0 && bp.closeErr != nil {
|
|
||||||
return 0, bp.closeErr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for {
|
|
||||||
read := copy(p, bp.buf[0][bp.lastRead:])
|
|
||||||
n += read
|
|
||||||
bp.lastRead += read
|
|
||||||
if bp.len() == 0 {
|
|
||||||
// we have read everything. reset to the beginning.
|
|
||||||
bp.lastRead = 0
|
|
||||||
bp.bufLen -= len(bp.buf[0])
|
|
||||||
bp.buf[0] = bp.buf[0][:0]
|
|
||||||
break
|
|
||||||
}
|
|
||||||
// break if everything was read
|
|
||||||
if len(p) == read {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
// more buffered data and more asked. read from next slice.
|
|
||||||
p = p[read:]
|
|
||||||
bp.lastRead = 0
|
|
||||||
bp.bufLen -= len(bp.buf[0])
|
|
||||||
bp.buf[0] = nil // throw away old slice
|
|
||||||
bp.buf = bp.buf[1:] // switch to next
|
|
||||||
}
|
|
||||||
bp.wait.Broadcast()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
@@ -1,22 +0,0 @@
|
|||||||
package ioutils
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
)
|
|
||||||
|
|
||||||
// FprintfIfNotEmpty prints the string value if it's not empty
|
|
||||||
func FprintfIfNotEmpty(w io.Writer, format, value string) (int, error) {
|
|
||||||
if value != "" {
|
|
||||||
return fmt.Fprintf(w, format, value)
|
|
||||||
}
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// FprintfIfTrue prints the boolean value if it's true
|
|
||||||
func FprintfIfTrue(w io.Writer, format string, ok bool) (int, error) {
|
|
||||||
if ok {
|
|
||||||
return fmt.Fprintf(w, format, ok)
|
|
||||||
}
|
|
||||||
return 0, nil
|
|
||||||
}
|
|
||||||
@@ -1,226 +0,0 @@
|
|||||||
package ioutils
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
)
|
|
||||||
|
|
||||||
type pos struct {
|
|
||||||
idx int
|
|
||||||
offset int64
|
|
||||||
}
|
|
||||||
|
|
||||||
type multiReadSeeker struct {
|
|
||||||
readers []io.ReadSeeker
|
|
||||||
pos *pos
|
|
||||||
posIdx map[io.ReadSeeker]int
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *multiReadSeeker) Seek(offset int64, whence int) (int64, error) {
|
|
||||||
var tmpOffset int64
|
|
||||||
switch whence {
|
|
||||||
case os.SEEK_SET:
|
|
||||||
for i, rdr := range r.readers {
|
|
||||||
// get size of the current reader
|
|
||||||
s, err := rdr.Seek(0, os.SEEK_END)
|
|
||||||
if err != nil {
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
|
|
||||||
if offset > tmpOffset+s {
|
|
||||||
if i == len(r.readers)-1 {
|
|
||||||
rdrOffset := s + (offset - tmpOffset)
|
|
||||||
if _, err := rdr.Seek(rdrOffset, os.SEEK_SET); err != nil {
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
r.pos = &pos{i, rdrOffset}
|
|
||||||
return offset, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
tmpOffset += s
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
rdrOffset := offset - tmpOffset
|
|
||||||
idx := i
|
|
||||||
|
|
||||||
rdr.Seek(rdrOffset, os.SEEK_SET)
|
|
||||||
// make sure all following readers are at 0
|
|
||||||
for _, rdr := range r.readers[i+1:] {
|
|
||||||
rdr.Seek(0, os.SEEK_SET)
|
|
||||||
}
|
|
||||||
|
|
||||||
if rdrOffset == s && i != len(r.readers)-1 {
|
|
||||||
idx++
|
|
||||||
rdrOffset = 0
|
|
||||||
}
|
|
||||||
r.pos = &pos{idx, rdrOffset}
|
|
||||||
return offset, nil
|
|
||||||
}
|
|
||||||
case os.SEEK_END:
|
|
||||||
for _, rdr := range r.readers {
|
|
||||||
s, err := rdr.Seek(0, os.SEEK_END)
|
|
||||||
if err != nil {
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
tmpOffset += s
|
|
||||||
}
|
|
||||||
r.Seek(tmpOffset+offset, os.SEEK_SET)
|
|
||||||
return tmpOffset + offset, nil
|
|
||||||
case os.SEEK_CUR:
|
|
||||||
if r.pos == nil {
|
|
||||||
return r.Seek(offset, os.SEEK_SET)
|
|
||||||
}
|
|
||||||
// Just return the current offset
|
|
||||||
if offset == 0 {
|
|
||||||
return r.getCurOffset()
|
|
||||||
}
|
|
||||||
|
|
||||||
curOffset, err := r.getCurOffset()
|
|
||||||
if err != nil {
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
rdr, rdrOffset, err := r.getReaderForOffset(curOffset + offset)
|
|
||||||
if err != nil {
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
|
|
||||||
r.pos = &pos{r.posIdx[rdr], rdrOffset}
|
|
||||||
return curOffset + offset, nil
|
|
||||||
default:
|
|
||||||
return -1, fmt.Errorf("Invalid whence: %d", whence)
|
|
||||||
}
|
|
||||||
|
|
||||||
return -1, fmt.Errorf("Error seeking for whence: %d, offset: %d", whence, offset)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *multiReadSeeker) getReaderForOffset(offset int64) (io.ReadSeeker, int64, error) {
|
|
||||||
var rdr io.ReadSeeker
|
|
||||||
var rdrOffset int64
|
|
||||||
|
|
||||||
for i, rdr := range r.readers {
|
|
||||||
offsetTo, err := r.getOffsetToReader(rdr)
|
|
||||||
if err != nil {
|
|
||||||
return nil, -1, err
|
|
||||||
}
|
|
||||||
if offsetTo > offset {
|
|
||||||
rdr = r.readers[i-1]
|
|
||||||
rdrOffset = offsetTo - offset
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
if rdr == r.readers[len(r.readers)-1] {
|
|
||||||
rdrOffset = offsetTo + offset
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return rdr, rdrOffset, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *multiReadSeeker) getCurOffset() (int64, error) {
|
|
||||||
var totalSize int64
|
|
||||||
for _, rdr := range r.readers[:r.pos.idx+1] {
|
|
||||||
if r.posIdx[rdr] == r.pos.idx {
|
|
||||||
totalSize += r.pos.offset
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
size, err := getReadSeekerSize(rdr)
|
|
||||||
if err != nil {
|
|
||||||
return -1, fmt.Errorf("error getting seeker size: %v", err)
|
|
||||||
}
|
|
||||||
totalSize += size
|
|
||||||
}
|
|
||||||
return totalSize, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *multiReadSeeker) getOffsetToReader(rdr io.ReadSeeker) (int64, error) {
|
|
||||||
var offset int64
|
|
||||||
for _, r := range r.readers {
|
|
||||||
if r == rdr {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
|
|
||||||
size, err := getReadSeekerSize(rdr)
|
|
||||||
if err != nil {
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
offset += size
|
|
||||||
}
|
|
||||||
return offset, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *multiReadSeeker) Read(b []byte) (int, error) {
|
|
||||||
if r.pos == nil {
|
|
||||||
r.pos = &pos{0, 0}
|
|
||||||
}
|
|
||||||
|
|
||||||
bCap := int64(cap(b))
|
|
||||||
buf := bytes.NewBuffer(nil)
|
|
||||||
var rdr io.ReadSeeker
|
|
||||||
|
|
||||||
for _, rdr = range r.readers[r.pos.idx:] {
|
|
||||||
readBytes, err := io.CopyN(buf, rdr, bCap)
|
|
||||||
if err != nil && err != io.EOF {
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
bCap -= readBytes
|
|
||||||
|
|
||||||
if bCap == 0 {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
rdrPos, err := rdr.Seek(0, os.SEEK_CUR)
|
|
||||||
if err != nil {
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
r.pos = &pos{r.posIdx[rdr], rdrPos}
|
|
||||||
return buf.Read(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func getReadSeekerSize(rdr io.ReadSeeker) (int64, error) {
|
|
||||||
// save the current position
|
|
||||||
pos, err := rdr.Seek(0, os.SEEK_CUR)
|
|
||||||
if err != nil {
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// get the size
|
|
||||||
size, err := rdr.Seek(0, os.SEEK_END)
|
|
||||||
if err != nil {
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// reset the position
|
|
||||||
if _, err := rdr.Seek(pos, os.SEEK_SET); err != nil {
|
|
||||||
return -1, err
|
|
||||||
}
|
|
||||||
return size, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// MultiReadSeeker returns a ReadSeeker that's the logical concatenation of the provided
|
|
||||||
// input readseekers. After calling this method the initial position is set to the
|
|
||||||
// beginning of the first ReadSeeker. At the end of a ReadSeeker, Read always advances
|
|
||||||
// to the beginning of the next ReadSeeker and returns EOF at the end of the last ReadSeeker.
|
|
||||||
// Seek can be used over the sum of lengths of all readseekers.
|
|
||||||
//
|
|
||||||
// When a MultiReadSeeker is used, no Read and Seek operations should be made on
|
|
||||||
// its ReadSeeker components. Also, users should make no assumption on the state
|
|
||||||
// of individual readseekers while the MultiReadSeeker is used.
|
|
||||||
func MultiReadSeeker(readers ...io.ReadSeeker) io.ReadSeeker {
|
|
||||||
if len(readers) == 1 {
|
|
||||||
return readers[0]
|
|
||||||
}
|
|
||||||
idx := make(map[io.ReadSeeker]int)
|
|
||||||
for i, rdr := range readers {
|
|
||||||
idx[rdr] = i
|
|
||||||
}
|
|
||||||
return &multiReadSeeker{
|
|
||||||
readers: readers,
|
|
||||||
posIdx: idx,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,154 +0,0 @@
|
|||||||
package ioutils
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/sha256"
|
|
||||||
"encoding/hex"
|
|
||||||
"io"
|
|
||||||
|
|
||||||
"github.com/fsouza/go-dockerclient/external/golang.org/x/net/context"
|
|
||||||
)
|
|
||||||
|
|
||||||
type readCloserWrapper struct {
|
|
||||||
io.Reader
|
|
||||||
closer func() error
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *readCloserWrapper) Close() error {
|
|
||||||
return r.closer()
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewReadCloserWrapper returns a new io.ReadCloser.
|
|
||||||
func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser {
|
|
||||||
return &readCloserWrapper{
|
|
||||||
Reader: r,
|
|
||||||
closer: closer,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
type readerErrWrapper struct {
|
|
||||||
reader io.Reader
|
|
||||||
closer func()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *readerErrWrapper) Read(p []byte) (int, error) {
|
|
||||||
n, err := r.reader.Read(p)
|
|
||||||
if err != nil {
|
|
||||||
r.closer()
|
|
||||||
}
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewReaderErrWrapper returns a new io.Reader.
|
|
||||||
func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader {
|
|
||||||
return &readerErrWrapper{
|
|
||||||
reader: r,
|
|
||||||
closer: closer,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// HashData returns the sha256 sum of src.
|
|
||||||
func HashData(src io.Reader) (string, error) {
|
|
||||||
h := sha256.New()
|
|
||||||
if _, err := io.Copy(h, src); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// OnEOFReader wraps a io.ReadCloser and a function
|
|
||||||
// the function will run at the end of file or close the file.
|
|
||||||
type OnEOFReader struct {
|
|
||||||
Rc io.ReadCloser
|
|
||||||
Fn func()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *OnEOFReader) Read(p []byte) (n int, err error) {
|
|
||||||
n, err = r.Rc.Read(p)
|
|
||||||
if err == io.EOF {
|
|
||||||
r.runFunc()
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the file and run the function.
|
|
||||||
func (r *OnEOFReader) Close() error {
|
|
||||||
err := r.Rc.Close()
|
|
||||||
r.runFunc()
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (r *OnEOFReader) runFunc() {
|
|
||||||
if fn := r.Fn; fn != nil {
|
|
||||||
fn()
|
|
||||||
r.Fn = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read
|
|
||||||
// operations.
|
|
||||||
type cancelReadCloser struct {
|
|
||||||
cancel func()
|
|
||||||
pR *io.PipeReader // Stream to read from
|
|
||||||
pW *io.PipeWriter
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the
|
|
||||||
// context is cancelled. The returned io.ReadCloser must be closed when it is
|
|
||||||
// no longer needed.
|
|
||||||
func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser {
|
|
||||||
pR, pW := io.Pipe()
|
|
||||||
|
|
||||||
// Create a context used to signal when the pipe is closed
|
|
||||||
doneCtx, cancel := context.WithCancel(context.Background())
|
|
||||||
|
|
||||||
p := &cancelReadCloser{
|
|
||||||
cancel: cancel,
|
|
||||||
pR: pR,
|
|
||||||
pW: pW,
|
|
||||||
}
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
_, err := io.Copy(pW, in)
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
// If the context was closed, p.closeWithError
|
|
||||||
// was already called. Calling it again would
|
|
||||||
// change the error that Read returns.
|
|
||||||
default:
|
|
||||||
p.closeWithError(err)
|
|
||||||
}
|
|
||||||
in.Close()
|
|
||||||
}()
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
p.closeWithError(ctx.Err())
|
|
||||||
case <-doneCtx.Done():
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
return p
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read wraps the Read method of the pipe that provides data from the wrapped
|
|
||||||
// ReadCloser.
|
|
||||||
func (p *cancelReadCloser) Read(buf []byte) (n int, err error) {
|
|
||||||
return p.pR.Read(buf)
|
|
||||||
}
|
|
||||||
|
|
||||||
// closeWithError closes the wrapper and its underlying reader. It will
|
|
||||||
// cause future calls to Read to return err.
|
|
||||||
func (p *cancelReadCloser) closeWithError(err error) {
|
|
||||||
p.pW.CloseWithError(err)
|
|
||||||
p.cancel()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the wrapper its underlying reader. It will cause
|
|
||||||
// future calls to Read to return io.EOF.
|
|
||||||
func (p *cancelReadCloser) Close() error {
|
|
||||||
p.closeWithError(io.EOF)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -1,6 +0,0 @@
|
|||||||
// +build !gccgo
|
|
||||||
|
|
||||||
package ioutils
|
|
||||||
|
|
||||||
func callSchedulerIfNecessary() {
|
|
||||||
}
|
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
// +build gccgo
|
|
||||||
|
|
||||||
package ioutils
|
|
||||||
|
|
||||||
import (
|
|
||||||
"runtime"
|
|
||||||
)
|
|
||||||
|
|
||||||
func callSchedulerIfNecessary() {
|
|
||||||
//allow or force Go scheduler to switch context, without explicitly
|
|
||||||
//forcing this will make it hang when using gccgo implementation
|
|
||||||
runtime.Gosched()
|
|
||||||
}
|
|
||||||
@@ -1,10 +0,0 @@
|
|||||||
// +build !windows
|
|
||||||
|
|
||||||
package ioutils
|
|
||||||
|
|
||||||
import "io/ioutil"
|
|
||||||
|
|
||||||
// TempDir on Unix systems is equivalent to ioutil.TempDir.
|
|
||||||
func TempDir(dir, prefix string) (string, error) {
|
|
||||||
return ioutil.TempDir(dir, prefix)
|
|
||||||
}
|
|
||||||
@@ -1,18 +0,0 @@
|
|||||||
// +build windows
|
|
||||||
|
|
||||||
package ioutils
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io/ioutil"
|
|
||||||
|
|
||||||
"github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/longpath"
|
|
||||||
)
|
|
||||||
|
|
||||||
// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format.
|
|
||||||
func TempDir(dir, prefix string) (string, error) {
|
|
||||||
tempDir, err := ioutil.TempDir(dir, prefix)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
return longpath.AddPrefix(tempDir), nil
|
|
||||||
}
|
|
||||||
@@ -1,92 +0,0 @@
|
|||||||
package ioutils
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
"io"
|
|
||||||
"net/http"
|
|
||||||
"sync"
|
|
||||||
)
|
|
||||||
|
|
||||||
// WriteFlusher wraps the Write and Flush operation ensuring that every write
|
|
||||||
// is a flush. In addition, the Close method can be called to intercept
|
|
||||||
// Read/Write calls if the targets lifecycle has already ended.
|
|
||||||
type WriteFlusher struct {
|
|
||||||
mu sync.Mutex
|
|
||||||
w io.Writer
|
|
||||||
flusher http.Flusher
|
|
||||||
flushed bool
|
|
||||||
closed error
|
|
||||||
|
|
||||||
// TODO(stevvooe): Use channel for closed instead, remove mutex. Using a
|
|
||||||
// channel will allow one to properly order the operations.
|
|
||||||
}
|
|
||||||
|
|
||||||
var errWriteFlusherClosed = errors.New("writeflusher: closed")
|
|
||||||
|
|
||||||
func (wf *WriteFlusher) Write(b []byte) (n int, err error) {
|
|
||||||
wf.mu.Lock()
|
|
||||||
defer wf.mu.Unlock()
|
|
||||||
if wf.closed != nil {
|
|
||||||
return 0, wf.closed
|
|
||||||
}
|
|
||||||
|
|
||||||
n, err = wf.w.Write(b)
|
|
||||||
wf.flush() // every write is a flush.
|
|
||||||
return n, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Flush the stream immediately.
|
|
||||||
func (wf *WriteFlusher) Flush() {
|
|
||||||
wf.mu.Lock()
|
|
||||||
defer wf.mu.Unlock()
|
|
||||||
|
|
||||||
wf.flush()
|
|
||||||
}
|
|
||||||
|
|
||||||
// flush the stream immediately without taking a lock. Used internally.
|
|
||||||
func (wf *WriteFlusher) flush() {
|
|
||||||
if wf.closed != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
wf.flushed = true
|
|
||||||
wf.flusher.Flush()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Flushed returns the state of flushed.
|
|
||||||
// If it's flushed, return true, or else it return false.
|
|
||||||
func (wf *WriteFlusher) Flushed() bool {
|
|
||||||
// BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to
|
|
||||||
// be used to detect whether or a response code has been issued or not.
|
|
||||||
// Another hook should be used instead.
|
|
||||||
wf.mu.Lock()
|
|
||||||
defer wf.mu.Unlock()
|
|
||||||
|
|
||||||
return wf.flushed
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the write flusher, disallowing any further writes to the
|
|
||||||
// target. After the flusher is closed, all calls to write or flush will
|
|
||||||
// result in an error.
|
|
||||||
func (wf *WriteFlusher) Close() error {
|
|
||||||
wf.mu.Lock()
|
|
||||||
defer wf.mu.Unlock()
|
|
||||||
|
|
||||||
if wf.closed != nil {
|
|
||||||
return wf.closed
|
|
||||||
}
|
|
||||||
|
|
||||||
wf.closed = errWriteFlusherClosed
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewWriteFlusher returns a new WriteFlusher.
|
|
||||||
func NewWriteFlusher(w io.Writer) *WriteFlusher {
|
|
||||||
var flusher http.Flusher
|
|
||||||
if f, ok := w.(http.Flusher); ok {
|
|
||||||
flusher = f
|
|
||||||
} else {
|
|
||||||
flusher = &NopFlusher{}
|
|
||||||
}
|
|
||||||
return &WriteFlusher{w: w, flusher: flusher}
|
|
||||||
}
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user