Add dockertest.v3 to vendor

This commit is contained in:
Jeff Mitchell
2017-02-26 16:53:19 -05:00
parent ee4c13c944
commit 7a31da8d94
325 changed files with 39074 additions and 0 deletions

191
vendor/github.com/docker/docker/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
https://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2013-2017 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

19
vendor/github.com/docker/docker/NOTICE generated vendored Normal file
View File

@@ -0,0 +1,19 @@
Docker
Copyright 2012-2017 Docker, Inc.
This product includes software developed at Docker, Inc. (https://www.docker.com).
This product contains software (https://github.com/kr/pty) developed
by Keith Rarick, licensed under the MIT License.
The following is courtesy of our legal counsel:
Use and transfer of Docker may be subject to certain restrictions by the
United States and other governments.
It is your responsibility to ensure that your use and/or transfer does not
violate applicable laws.
For more information, please see https://www.bis.doc.gov
See also https://www.apache.org/dev/crypto.html and/or seek legal counsel.

22
vendor/github.com/docker/docker/api/types/auth.go generated vendored Normal file
View File

@@ -0,0 +1,22 @@
package types
// AuthConfig contains authorization information for connecting to a Registry
type AuthConfig struct {
Username string `json:"username,omitempty"`
Password string `json:"password,omitempty"`
Auth string `json:"auth,omitempty"`
// Email is an optional value associated with the username.
// This field is deprecated and will be removed in a later
// version of docker.
Email string `json:"email,omitempty"`
ServerAddress string `json:"serveraddress,omitempty"`
// IdentityToken is used to authenticate the user and get
// an access token for the registry.
IdentityToken string `json:"identitytoken,omitempty"`
// RegistryToken is a bearer token to be sent to a registry
RegistryToken string `json:"registrytoken,omitempty"`
}

View File

@@ -0,0 +1,23 @@
package blkiodev
import "fmt"
// WeightDevice is a structure that holds device:weight pair
type WeightDevice struct {
Path string
Weight uint16
}
func (w *WeightDevice) String() string {
return fmt.Sprintf("%s:%d", w.Path, w.Weight)
}
// ThrottleDevice is a structure that holds device:rate_per_second pair
type ThrottleDevice struct {
Path string
Rate uint64
}
func (t *ThrottleDevice) String() string {
return fmt.Sprintf("%s:%d", t.Path, t.Rate)
}

379
vendor/github.com/docker/docker/api/types/client.go generated vendored Normal file
View File

@@ -0,0 +1,379 @@
package types
import (
"bufio"
"io"
"net"
"os"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/go-units"
)
// CheckpointCreateOptions holds parameters to create a checkpoint from a container
type CheckpointCreateOptions struct {
CheckpointID string
CheckpointDir string
Exit bool
}
// CheckpointListOptions holds parameters to list checkpoints for a container
type CheckpointListOptions struct {
CheckpointDir string
}
// CheckpointDeleteOptions holds parameters to delete a checkpoint from a container
type CheckpointDeleteOptions struct {
CheckpointID string
CheckpointDir string
}
// ContainerAttachOptions holds parameters to attach to a container.
type ContainerAttachOptions struct {
Stream bool
Stdin bool
Stdout bool
Stderr bool
DetachKeys string
Logs bool
}
// ContainerCommitOptions holds parameters to commit changes into a container.
type ContainerCommitOptions struct {
Reference string
Comment string
Author string
Changes []string
Pause bool
Config *container.Config
}
// ContainerExecInspect holds information returned by exec inspect.
type ContainerExecInspect struct {
ExecID string
ContainerID string
Running bool
ExitCode int
Pid int
}
// ContainerListOptions holds parameters to list containers with.
type ContainerListOptions struct {
Quiet bool
Size bool
All bool
Latest bool
Since string
Before string
Limit int
Filters filters.Args
}
// ContainerLogsOptions holds parameters to filter logs with.
type ContainerLogsOptions struct {
ShowStdout bool
ShowStderr bool
Since string
Timestamps bool
Follow bool
Tail string
Details bool
}
// ContainerRemoveOptions holds parameters to remove containers.
type ContainerRemoveOptions struct {
RemoveVolumes bool
RemoveLinks bool
Force bool
}
// ContainerStartOptions holds parameters to start containers.
type ContainerStartOptions struct {
CheckpointID string
CheckpointDir string
}
// CopyToContainerOptions holds information
// about files to copy into a container
type CopyToContainerOptions struct {
AllowOverwriteDirWithFile bool
}
// EventsOptions holds parameters to filter events with.
type EventsOptions struct {
Since string
Until string
Filters filters.Args
}
// NetworkListOptions holds parameters to filter the list of networks with.
type NetworkListOptions struct {
Filters filters.Args
}
// HijackedResponse holds connection information for a hijacked request.
type HijackedResponse struct {
Conn net.Conn
Reader *bufio.Reader
}
// Close closes the hijacked connection and reader.
func (h *HijackedResponse) Close() {
h.Conn.Close()
}
// CloseWriter is an interface that implements structs
// that close input streams to prevent from writing.
type CloseWriter interface {
CloseWrite() error
}
// CloseWrite closes a readWriter for writing.
func (h *HijackedResponse) CloseWrite() error {
if conn, ok := h.Conn.(CloseWriter); ok {
return conn.CloseWrite()
}
return nil
}
// ImageBuildOptions holds the information
// necessary to build images.
type ImageBuildOptions struct {
Tags []string
SuppressOutput bool
RemoteContext string
NoCache bool
Remove bool
ForceRemove bool
PullParent bool
Isolation container.Isolation
CPUSetCPUs string
CPUSetMems string
CPUShares int64
CPUQuota int64
CPUPeriod int64
Memory int64
MemorySwap int64
CgroupParent string
NetworkMode string
ShmSize int64
Dockerfile string
Ulimits []*units.Ulimit
// BuildArgs needs to be a *string instead of just a string so that
// we can tell the difference between "" (empty string) and no value
// at all (nil). See the parsing of buildArgs in
// api/server/router/build/build_routes.go for even more info.
BuildArgs map[string]*string
AuthConfigs map[string]AuthConfig
Context io.Reader
Labels map[string]string
// squash the resulting image's layers to the parent
// preserves the original image and creates a new one from the parent with all
// the changes applied to a single layer
Squash bool
// CacheFrom specifies images that are used for matching cache. Images
// specified here do not need to have a valid parent chain to match cache.
CacheFrom []string
SecurityOpt []string
}
// ImageBuildResponse holds information
// returned by a server after building
// an image.
type ImageBuildResponse struct {
Body io.ReadCloser
OSType string
}
// ImageCreateOptions holds information to create images.
type ImageCreateOptions struct {
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
}
// ImageImportSource holds source information for ImageImport
type ImageImportSource struct {
Source io.Reader // Source is the data to send to the server to create this image from. You must set SourceName to "-" to leverage this.
SourceName string // SourceName is the name of the image to pull. Set to "-" to leverage the Source attribute.
}
// ImageImportOptions holds information to import images from the client host.
type ImageImportOptions struct {
Tag string // Tag is the name to tag this image with. This attribute is deprecated.
Message string // Message is the message to tag the image with
Changes []string // Changes are the raw changes to apply to this image
}
// ImageListOptions holds parameters to filter the list of images with.
type ImageListOptions struct {
All bool
Filters filters.Args
}
// ImageLoadResponse returns information to the client about a load process.
type ImageLoadResponse struct {
// Body must be closed to avoid a resource leak
Body io.ReadCloser
JSON bool
}
// ImagePullOptions holds information to pull images.
type ImagePullOptions struct {
All bool
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
PrivilegeFunc RequestPrivilegeFunc
}
// RequestPrivilegeFunc is a function interface that
// clients can supply to retry operations after
// getting an authorization error.
// This function returns the registry authentication
// header value in base 64 format, or an error
// if the privilege request fails.
type RequestPrivilegeFunc func() (string, error)
//ImagePushOptions holds information to push images.
type ImagePushOptions ImagePullOptions
// ImageRemoveOptions holds parameters to remove images.
type ImageRemoveOptions struct {
Force bool
PruneChildren bool
}
// ImageSearchOptions holds parameters to search images with.
type ImageSearchOptions struct {
RegistryAuth string
PrivilegeFunc RequestPrivilegeFunc
Filters filters.Args
Limit int
}
// ResizeOptions holds parameters to resize a tty.
// It can be used to resize container ttys and
// exec process ttys too.
type ResizeOptions struct {
Height uint
Width uint
}
// VersionResponse holds version information for the client and the server
type VersionResponse struct {
Client *Version
Server *Version
}
// ServerOK returns true when the client could connect to the docker server
// and parse the information received. It returns false otherwise.
func (v VersionResponse) ServerOK() bool {
return v.Server != nil
}
// NodeListOptions holds parameters to list nodes with.
type NodeListOptions struct {
Filters filters.Args
}
// NodeRemoveOptions holds parameters to remove nodes with.
type NodeRemoveOptions struct {
Force bool
}
// ServiceCreateOptions contains the options to use when creating a service.
type ServiceCreateOptions struct {
// EncodedRegistryAuth is the encoded registry authorization credentials to
// use when updating the service.
//
// This field follows the format of the X-Registry-Auth header.
EncodedRegistryAuth string
}
// ServiceCreateResponse contains the information returned to a client
// on the creation of a new service.
type ServiceCreateResponse struct {
// ID is the ID of the created service.
ID string
// Warnings is a set of non-fatal warning messages to pass on to the user.
Warnings []string `json:",omitempty"`
}
// Values for RegistryAuthFrom in ServiceUpdateOptions
const (
RegistryAuthFromSpec = "spec"
RegistryAuthFromPreviousSpec = "previous-spec"
)
// ServiceUpdateOptions contains the options to be used for updating services.
type ServiceUpdateOptions struct {
// EncodedRegistryAuth is the encoded registry authorization credentials to
// use when updating the service.
//
// This field follows the format of the X-Registry-Auth header.
EncodedRegistryAuth string
// TODO(stevvooe): Consider moving the version parameter of ServiceUpdate
// into this field. While it does open API users up to racy writes, most
// users may not need that level of consistency in practice.
// RegistryAuthFrom specifies where to find the registry authorization
// credentials if they are not given in EncodedRegistryAuth. Valid
// values are "spec" and "previous-spec".
RegistryAuthFrom string
}
// ServiceListOptions holds parameters to list services with.
type ServiceListOptions struct {
Filters filters.Args
}
// TaskListOptions holds parameters to list tasks with.
type TaskListOptions struct {
Filters filters.Args
}
// PluginRemoveOptions holds parameters to remove plugins.
type PluginRemoveOptions struct {
Force bool
}
// PluginEnableOptions holds parameters to enable plugins.
type PluginEnableOptions struct {
Timeout int
}
// PluginDisableOptions holds parameters to disable plugins.
type PluginDisableOptions struct {
Force bool
}
// PluginInstallOptions holds parameters to install a plugin.
type PluginInstallOptions struct {
Disabled bool
AcceptAllPermissions bool
RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry
RemoteRef string // RemoteRef is the plugin name on the registry
PrivilegeFunc RequestPrivilegeFunc
AcceptPermissionsFunc func(PluginPrivileges) (bool, error)
Args []string
}
// SecretRequestOption is a type for requesting secrets
type SecretRequestOption struct {
Source string
Target string
UID string
GID string
Mode os.FileMode
}
// SwarmUnlockKeyResponse contains the response for Engine API:
// GET /swarm/unlockkey
type SwarmUnlockKeyResponse struct {
// UnlockKey is the unlock key in ASCII-armored format.
UnlockKey string
}
// PluginCreateOptions hold all options to plugin create.
type PluginCreateOptions struct {
RepoName string
}

69
vendor/github.com/docker/docker/api/types/configs.go generated vendored Normal file
View File

@@ -0,0 +1,69 @@
package types
import (
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/network"
)
// configs holds structs used for internal communication between the
// frontend (such as an http server) and the backend (such as the
// docker daemon).
// ContainerCreateConfig is the parameter set to ContainerCreate()
type ContainerCreateConfig struct {
Name string
Config *container.Config
HostConfig *container.HostConfig
NetworkingConfig *network.NetworkingConfig
AdjustCPUShares bool
}
// ContainerRmConfig holds arguments for the container remove
// operation. This struct is used to tell the backend what operations
// to perform.
type ContainerRmConfig struct {
ForceRemove, RemoveVolume, RemoveLink bool
}
// ContainerCommitConfig contains build configs for commit operation,
// and is used when making a commit with the current state of the container.
type ContainerCommitConfig struct {
Pause bool
Repo string
Tag string
Author string
Comment string
// merge container config into commit config before commit
MergeConfigs bool
Config *container.Config
}
// ExecConfig is a small subset of the Config struct that holds the configuration
// for the exec feature of docker.
type ExecConfig struct {
User string // User that will run the command
Privileged bool // Is the container in privileged mode
Tty bool // Attach standard streams to a tty.
AttachStdin bool // Attach the standard input, makes possible user interaction
AttachStderr bool // Attach the standard error
AttachStdout bool // Attach the standard output
Detach bool // Execute in detach mode
DetachKeys string // Escape keys for detach
Env []string // Environment variables
Cmd []string // Execution commands and args
}
// PluginRmConfig holds arguments for plugin remove.
type PluginRmConfig struct {
ForceRemove bool
}
// PluginEnableConfig holds arguments for plugin enable
type PluginEnableConfig struct {
Timeout int
}
// PluginDisableConfig holds arguments for plugin disable.
type PluginDisableConfig struct {
ForceDisable bool
}

View File

@@ -0,0 +1,62 @@
package container
import (
"time"
"github.com/docker/docker/api/types/strslice"
"github.com/docker/go-connections/nat"
)
// HealthConfig holds configuration settings for the HEALTHCHECK feature.
type HealthConfig struct {
// Test is the test to perform to check that the container is healthy.
// An empty slice means to inherit the default.
// The options are:
// {} : inherit healthcheck
// {"NONE"} : disable healthcheck
// {"CMD", args...} : exec arguments directly
// {"CMD-SHELL", command} : run command with system's default shell
Test []string `json:",omitempty"`
// Zero means to inherit. Durations are expressed as integer nanoseconds.
Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
// Retries is the number of consecutive failures needed to consider a container as unhealthy.
// Zero means inherit.
Retries int `json:",omitempty"`
}
// Config contains the configuration data about a container.
// It should hold only portable information about the container.
// Here, "portable" means "independent from the host we are running on".
// Non-portable information *should* appear in HostConfig.
// All fields added to this struct must be marked `omitempty` to keep getting
// predictable hashes from the old `v1Compatibility` configuration.
type Config struct {
Hostname string // Hostname
Domainname string // Domainname
User string // User that will run the command(s) inside the container, also support user:group
AttachStdin bool // Attach the standard input, makes possible user interaction
AttachStdout bool // Attach the standard output
AttachStderr bool // Attach the standard error
ExposedPorts nat.PortSet `json:",omitempty"` // List of exposed ports
Tty bool // Attach standard streams to a tty, including stdin if it is not closed.
OpenStdin bool // Open stdin
StdinOnce bool // If true, close stdin after the 1 attached client disconnects.
Env []string // List of environment variable to set in the container
Cmd strslice.StrSlice // Command to run when starting the container
Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy
ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific)
Image string // Name of the image as it was passed by the operator (e.g. could be symbolic)
Volumes map[string]struct{} // List of volumes (mounts) used for the container
WorkingDir string // Current directory (PWD) in the command will be launched
Entrypoint strslice.StrSlice // Entrypoint to run when starting the container
NetworkDisabled bool `json:",omitempty"` // Is network disabled
MacAddress string `json:",omitempty"` // Mac Address of the container
OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile
Labels map[string]string // List of labels set to this container
StopSignal string `json:",omitempty"` // Signal to stop a container
StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container
Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT
}

View File

@@ -0,0 +1,21 @@
package container
// ----------------------------------------------------------------------------
// DO NOT EDIT THIS FILE
// This file was generated by `swagger generate operation`
//
// See hack/generate-swagger-api.sh
// ----------------------------------------------------------------------------
// ContainerChangeResponseItem container change response item
// swagger:model ContainerChangeResponseItem
type ContainerChangeResponseItem struct {
// Kind of change
// Required: true
Kind uint8 `json:"Kind"`
// Path to file that has changed
// Required: true
Path string `json:"Path"`
}

View File

@@ -0,0 +1,21 @@
package container
// ----------------------------------------------------------------------------
// DO NOT EDIT THIS FILE
// This file was generated by `swagger generate operation`
//
// See hack/generate-swagger-api.sh
// ----------------------------------------------------------------------------
// ContainerCreateCreatedBody container create created body
// swagger:model ContainerCreateCreatedBody
type ContainerCreateCreatedBody struct {
// The ID of the created container
// Required: true
ID string `json:"Id"`
// Warnings encountered when creating the container
// Required: true
Warnings []string `json:"Warnings"`
}

View File

@@ -0,0 +1,21 @@
package container
// ----------------------------------------------------------------------------
// DO NOT EDIT THIS FILE
// This file was generated by `swagger generate operation`
//
// See hack/generate-swagger-api.sh
// ----------------------------------------------------------------------------
// ContainerTopOKBody container top o k body
// swagger:model ContainerTopOKBody
type ContainerTopOKBody struct {
// Each process running in the container, where each is process is an array of values corresponding to the titles
// Required: true
Processes [][]string `json:"Processes"`
// The ps column titles
// Required: true
Titles []string `json:"Titles"`
}

View File

@@ -0,0 +1,17 @@
package container
// ----------------------------------------------------------------------------
// DO NOT EDIT THIS FILE
// This file was generated by `swagger generate operation`
//
// See hack/generate-swagger-api.sh
// ----------------------------------------------------------------------------
// ContainerUpdateOKBody container update o k body
// swagger:model ContainerUpdateOKBody
type ContainerUpdateOKBody struct {
// warnings
// Required: true
Warnings []string `json:"Warnings"`
}

View File

@@ -0,0 +1,17 @@
package container
// ----------------------------------------------------------------------------
// DO NOT EDIT THIS FILE
// This file was generated by `swagger generate operation`
//
// See hack/generate-swagger-api.sh
// ----------------------------------------------------------------------------
// ContainerWaitOKBody container wait o k body
// swagger:model ContainerWaitOKBody
type ContainerWaitOKBody struct {
// Exit code of the container
// Required: true
StatusCode int64 `json:"StatusCode"`
}

View File

@@ -0,0 +1,345 @@
package container
import (
"strings"
"github.com/docker/docker/api/types/blkiodev"
"github.com/docker/docker/api/types/mount"
"github.com/docker/docker/api/types/strslice"
"github.com/docker/go-connections/nat"
"github.com/docker/go-units"
)
// NetworkMode represents the container network stack.
type NetworkMode string
// Isolation represents the isolation technology of a container. The supported
// values are platform specific
type Isolation string
// IsDefault indicates the default isolation technology of a container. On Linux this
// is the native driver. On Windows, this is a Windows Server Container.
func (i Isolation) IsDefault() bool {
return strings.ToLower(string(i)) == "default" || string(i) == ""
}
// IpcMode represents the container ipc stack.
type IpcMode string
// IsPrivate indicates whether the container uses its private ipc stack.
func (n IpcMode) IsPrivate() bool {
return !(n.IsHost() || n.IsContainer())
}
// IsHost indicates whether the container uses the host's ipc stack.
func (n IpcMode) IsHost() bool {
return n == "host"
}
// IsContainer indicates whether the container uses a container's ipc stack.
func (n IpcMode) IsContainer() bool {
parts := strings.SplitN(string(n), ":", 2)
return len(parts) > 1 && parts[0] == "container"
}
// Valid indicates whether the ipc stack is valid.
func (n IpcMode) Valid() bool {
parts := strings.Split(string(n), ":")
switch mode := parts[0]; mode {
case "", "host":
case "container":
if len(parts) != 2 || parts[1] == "" {
return false
}
default:
return false
}
return true
}
// Container returns the name of the container ipc stack is going to be used.
func (n IpcMode) Container() string {
parts := strings.SplitN(string(n), ":", 2)
if len(parts) > 1 {
return parts[1]
}
return ""
}
// UsernsMode represents userns mode in the container.
type UsernsMode string
// IsHost indicates whether the container uses the host's userns.
func (n UsernsMode) IsHost() bool {
return n == "host"
}
// IsPrivate indicates whether the container uses the a private userns.
func (n UsernsMode) IsPrivate() bool {
return !(n.IsHost())
}
// Valid indicates whether the userns is valid.
func (n UsernsMode) Valid() bool {
parts := strings.Split(string(n), ":")
switch mode := parts[0]; mode {
case "", "host":
default:
return false
}
return true
}
// CgroupSpec represents the cgroup to use for the container.
type CgroupSpec string
// IsContainer indicates whether the container is using another container cgroup
func (c CgroupSpec) IsContainer() bool {
parts := strings.SplitN(string(c), ":", 2)
return len(parts) > 1 && parts[0] == "container"
}
// Valid indicates whether the cgroup spec is valid.
func (c CgroupSpec) Valid() bool {
return c.IsContainer() || c == ""
}
// Container returns the name of the container whose cgroup will be used.
func (c CgroupSpec) Container() string {
parts := strings.SplitN(string(c), ":", 2)
if len(parts) > 1 {
return parts[1]
}
return ""
}
// UTSMode represents the UTS namespace of the container.
type UTSMode string
// IsPrivate indicates whether the container uses its private UTS namespace.
func (n UTSMode) IsPrivate() bool {
return !(n.IsHost())
}
// IsHost indicates whether the container uses the host's UTS namespace.
func (n UTSMode) IsHost() bool {
return n == "host"
}
// Valid indicates whether the UTS namespace is valid.
func (n UTSMode) Valid() bool {
parts := strings.Split(string(n), ":")
switch mode := parts[0]; mode {
case "", "host":
default:
return false
}
return true
}
// PidMode represents the pid namespace of the container.
type PidMode string
// IsPrivate indicates whether the container uses its own new pid namespace.
func (n PidMode) IsPrivate() bool {
return !(n.IsHost() || n.IsContainer())
}
// IsHost indicates whether the container uses the host's pid namespace.
func (n PidMode) IsHost() bool {
return n == "host"
}
// IsContainer indicates whether the container uses a container's pid namespace.
func (n PidMode) IsContainer() bool {
parts := strings.SplitN(string(n), ":", 2)
return len(parts) > 1 && parts[0] == "container"
}
// Valid indicates whether the pid namespace is valid.
func (n PidMode) Valid() bool {
parts := strings.Split(string(n), ":")
switch mode := parts[0]; mode {
case "", "host":
case "container":
if len(parts) != 2 || parts[1] == "" {
return false
}
default:
return false
}
return true
}
// Container returns the name of the container whose pid namespace is going to be used.
func (n PidMode) Container() string {
parts := strings.SplitN(string(n), ":", 2)
if len(parts) > 1 {
return parts[1]
}
return ""
}
// DeviceMapping represents the device mapping between the host and the container.
type DeviceMapping struct {
PathOnHost string
PathInContainer string
CgroupPermissions string
}
// RestartPolicy represents the restart policies of the container.
type RestartPolicy struct {
Name string
MaximumRetryCount int
}
// IsNone indicates whether the container has the "no" restart policy.
// This means the container will not automatically restart when exiting.
func (rp *RestartPolicy) IsNone() bool {
return rp.Name == "no" || rp.Name == ""
}
// IsAlways indicates whether the container has the "always" restart policy.
// This means the container will automatically restart regardless of the exit status.
func (rp *RestartPolicy) IsAlways() bool {
return rp.Name == "always"
}
// IsOnFailure indicates whether the container has the "on-failure" restart policy.
// This means the container will automatically restart of exiting with a non-zero exit status.
func (rp *RestartPolicy) IsOnFailure() bool {
return rp.Name == "on-failure"
}
// IsUnlessStopped indicates whether the container has the
// "unless-stopped" restart policy. This means the container will
// automatically restart unless user has put it to stopped state.
func (rp *RestartPolicy) IsUnlessStopped() bool {
return rp.Name == "unless-stopped"
}
// IsSame compares two RestartPolicy to see if they are the same
func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool {
return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount
}
// LogMode is a type to define the available modes for logging
// These modes affect how logs are handled when log messages start piling up.
type LogMode string
// Available logging modes
const (
LogModeUnset = ""
LogModeBlocking LogMode = "blocking"
LogModeNonBlock LogMode = "non-blocking"
)
// LogConfig represents the logging configuration of the container.
type LogConfig struct {
Type string
Config map[string]string
}
// Resources contains container's resources (cgroups config, ulimits...)
type Resources struct {
// Applicable to all platforms
CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers)
Memory int64 // Memory limit (in bytes)
NanoCPUs int64 `json:"NanoCpus"` // CPU quota in units of 10<sup>-9</sup> CPUs.
// Applicable to UNIX platforms
CgroupParent string // Parent cgroup.
BlkioWeight uint16 // Block IO weight (relative weight vs. other containers)
BlkioWeightDevice []*blkiodev.WeightDevice
BlkioDeviceReadBps []*blkiodev.ThrottleDevice
BlkioDeviceWriteBps []*blkiodev.ThrottleDevice
BlkioDeviceReadIOps []*blkiodev.ThrottleDevice
BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice
CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period
CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota
CPURealtimePeriod int64 `json:"CpuRealtimePeriod"` // CPU real-time period
CPURealtimeRuntime int64 `json:"CpuRealtimeRuntime"` // CPU real-time runtime
CpusetCpus string // CpusetCpus 0-2, 0,1
CpusetMems string // CpusetMems 0-2, 0,1
Devices []DeviceMapping // List of devices to map inside the container
DeviceCgroupRules []string // List of rule to be added to the device cgroup
DiskQuota int64 // Disk limit (in bytes)
KernelMemory int64 // Kernel memory limit (in bytes)
MemoryReservation int64 // Memory soft limit (in bytes)
MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap
MemorySwappiness *int64 // Tuning container memory swappiness behaviour
OomKillDisable *bool // Whether to disable OOM Killer or not
PidsLimit int64 // Setting pids limit for a container
Ulimits []*units.Ulimit // List of ulimits to be set in the container
// Applicable to Windows
CPUCount int64 `json:"CpuCount"` // CPU count
CPUPercent int64 `json:"CpuPercent"` // CPU percent
IOMaximumIOps uint64 // Maximum IOps for the container system drive
IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive
}
// UpdateConfig holds the mutable attributes of a Container.
// Those attributes can be updated at runtime.
type UpdateConfig struct {
// Contains container's resources (cgroups, ulimits)
Resources
RestartPolicy RestartPolicy
}
// HostConfig the non-portable Config structure of a container.
// Here, "non-portable" means "dependent of the host we are running on".
// Portable information *should* appear in Config.
type HostConfig struct {
// Applicable to all platforms
Binds []string // List of volume bindings for this container
ContainerIDFile string // File (path) where the containerId is written
LogConfig LogConfig // Configuration of the logs for this container
NetworkMode NetworkMode // Network mode to use for the container
PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host
RestartPolicy RestartPolicy // Restart policy to be used for the container
AutoRemove bool // Automatically remove container when it exits
VolumeDriver string // Name of the volume driver used to mount volumes
VolumesFrom []string // List of volumes to take from other container
// Applicable to UNIX platforms
CapAdd strslice.StrSlice // List of kernel capabilities to add to the container
CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container
DNS []string `json:"Dns"` // List of DNS server to lookup
DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for
DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for
ExtraHosts []string // List of extra hosts
GroupAdd []string // List of additional groups that the container process will run as
IpcMode IpcMode // IPC namespace to use for the container
Cgroup CgroupSpec // Cgroup to use for the container
Links []string // List of links (in the name:alias form)
OomScoreAdj int // Container preference for OOM-killing
PidMode PidMode // PID namespace to use for the container
Privileged bool // Is the container in privileged mode
PublishAllPorts bool // Should docker publish all exposed port for the container
ReadonlyRootfs bool // Is the container root filesystem in read-only
SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux.
StorageOpt map[string]string `json:",omitempty"` // Storage driver options per container.
Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container
UTSMode UTSMode // UTS namespace to use for the container
UsernsMode UsernsMode // The user namespace to use for the container
ShmSize int64 // Total shm memory usage
Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container
Runtime string `json:",omitempty"` // Runtime to use with this container
// Applicable to Windows
ConsoleSize [2]uint // Initial console size (height,width)
Isolation Isolation // Isolation technology of the container (e.g. default, hyperv)
// Contains container's resources (cgroups, ulimits)
Resources
// Mounts specs used by the container
Mounts []mount.Mount `json:",omitempty"`
// Run a custom init inside the container, if null, use the daemon's configured settings
Init *bool `json:",omitempty"`
// Custom init path
InitPath string `json:",omitempty"`
}

View File

@@ -0,0 +1,81 @@
// +build !windows
package container
import "strings"
// IsValid indicates if an isolation technology is valid
func (i Isolation) IsValid() bool {
return i.IsDefault()
}
// IsPrivate indicates whether container uses its private network stack.
func (n NetworkMode) IsPrivate() bool {
return !(n.IsHost() || n.IsContainer())
}
// IsDefault indicates whether container uses the default network stack.
func (n NetworkMode) IsDefault() bool {
return n == "default"
}
// NetworkName returns the name of the network stack.
func (n NetworkMode) NetworkName() string {
if n.IsBridge() {
return "bridge"
} else if n.IsHost() {
return "host"
} else if n.IsContainer() {
return "container"
} else if n.IsNone() {
return "none"
} else if n.IsDefault() {
return "default"
} else if n.IsUserDefined() {
return n.UserDefined()
}
return ""
}
// IsBridge indicates whether container uses the bridge network stack
func (n NetworkMode) IsBridge() bool {
return n == "bridge"
}
// IsHost indicates whether container uses the host network stack.
func (n NetworkMode) IsHost() bool {
return n == "host"
}
// IsContainer indicates whether container uses a container network stack.
func (n NetworkMode) IsContainer() bool {
parts := strings.SplitN(string(n), ":", 2)
return len(parts) > 1 && parts[0] == "container"
}
// IsNone indicates whether container isn't using a network stack.
func (n NetworkMode) IsNone() bool {
return n == "none"
}
// ConnectedContainer is the id of the container which network this container is connected to.
func (n NetworkMode) ConnectedContainer() string {
parts := strings.SplitN(string(n), ":", 2)
if len(parts) > 1 {
return parts[1]
}
return ""
}
// IsUserDefined indicates user-created network
func (n NetworkMode) IsUserDefined() bool {
return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer()
}
//UserDefined indicates user-created network
func (n NetworkMode) UserDefined() string {
if n.IsUserDefined() {
return string(n)
}
return ""
}

View File

@@ -0,0 +1,87 @@
package container
import (
"strings"
)
// IsDefault indicates whether container uses the default network stack.
func (n NetworkMode) IsDefault() bool {
return n == "default"
}
// IsNone indicates whether container isn't using a network stack.
func (n NetworkMode) IsNone() bool {
return n == "none"
}
// IsContainer indicates whether container uses a container network stack.
// Returns false as windows doesn't support this mode
func (n NetworkMode) IsContainer() bool {
return false
}
// IsBridge indicates whether container uses the bridge network stack
// in windows it is given the name NAT
func (n NetworkMode) IsBridge() bool {
return n == "nat"
}
// IsHost indicates whether container uses the host network stack.
// returns false as this is not supported by windows
func (n NetworkMode) IsHost() bool {
return false
}
// IsPrivate indicates whether container uses its private network stack.
func (n NetworkMode) IsPrivate() bool {
return !(n.IsHost() || n.IsContainer())
}
// ConnectedContainer is the id of the container which network this container is connected to.
// Returns blank string on windows
func (n NetworkMode) ConnectedContainer() string {
return ""
}
// IsUserDefined indicates user-created network
func (n NetworkMode) IsUserDefined() bool {
return !n.IsDefault() && !n.IsNone() && !n.IsBridge()
}
// IsHyperV indicates the use of a Hyper-V partition for isolation
func (i Isolation) IsHyperV() bool {
return strings.ToLower(string(i)) == "hyperv"
}
// IsProcess indicates the use of process isolation
func (i Isolation) IsProcess() bool {
return strings.ToLower(string(i)) == "process"
}
// IsValid indicates if an isolation technology is valid
func (i Isolation) IsValid() bool {
return i.IsDefault() || i.IsHyperV() || i.IsProcess()
}
// NetworkName returns the name of the network stack.
func (n NetworkMode) NetworkName() string {
if n.IsDefault() {
return "default"
} else if n.IsBridge() {
return "nat"
} else if n.IsNone() {
return "none"
} else if n.IsUserDefined() {
return n.UserDefined()
}
return ""
}
//UserDefined indicates user-created network
func (n NetworkMode) UserDefined() string {
if n.IsUserDefined() {
return string(n)
}
return ""
}

View File

@@ -0,0 +1,13 @@
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// ErrorResponse Represents an error.
// swagger:model ErrorResponse
type ErrorResponse struct {
// The error message.
// Required: true
Message string `json:"message"`
}

View File

@@ -0,0 +1,310 @@
// Package filters provides helper function to parse and handle command line
// filter, used for example in docker ps or docker images commands.
package filters
import (
"encoding/json"
"errors"
"fmt"
"regexp"
"strings"
"github.com/docker/docker/api/types/versions"
)
// Args stores filter arguments as map key:{map key: bool}.
// It contains an aggregation of the map of arguments (which are in the form
// of -f 'key=value') based on the key, and stores values for the same key
// in a map with string keys and boolean values.
// e.g given -f 'label=label1=1' -f 'label=label2=2' -f 'image.name=ubuntu'
// the args will be {"image.name":{"ubuntu":true},"label":{"label1=1":true,"label2=2":true}}
type Args struct {
fields map[string]map[string]bool
}
// NewArgs initializes a new Args struct.
func NewArgs() Args {
return Args{fields: map[string]map[string]bool{}}
}
// ParseFlag parses the argument to the filter flag. Like
//
// `docker ps -f 'created=today' -f 'image.name=ubuntu*'`
//
// If prev map is provided, then it is appended to, and returned. By default a new
// map is created.
func ParseFlag(arg string, prev Args) (Args, error) {
filters := prev
if len(arg) == 0 {
return filters, nil
}
if !strings.Contains(arg, "=") {
return filters, ErrBadFormat
}
f := strings.SplitN(arg, "=", 2)
name := strings.ToLower(strings.TrimSpace(f[0]))
value := strings.TrimSpace(f[1])
filters.Add(name, value)
return filters, nil
}
// ErrBadFormat is an error returned in case of bad format for a filter.
var ErrBadFormat = errors.New("bad format of filter (expected name=value)")
// ToParam packs the Args into a string for easy transport from client to server.
func ToParam(a Args) (string, error) {
// this way we don't URL encode {}, just empty space
if a.Len() == 0 {
return "", nil
}
buf, err := json.Marshal(a.fields)
if err != nil {
return "", err
}
return string(buf), nil
}
// ToParamWithVersion packs the Args into a string for easy transport from client to server.
// The generated string will depend on the specified version (corresponding to the API version).
func ToParamWithVersion(version string, a Args) (string, error) {
// this way we don't URL encode {}, just empty space
if a.Len() == 0 {
return "", nil
}
// for daemons older than v1.10, filter must be of the form map[string][]string
var buf []byte
var err error
if version != "" && versions.LessThan(version, "1.22") {
buf, err = json.Marshal(convertArgsToSlice(a.fields))
} else {
buf, err = json.Marshal(a.fields)
}
if err != nil {
return "", err
}
return string(buf), nil
}
// FromParam unpacks the filter Args.
func FromParam(p string) (Args, error) {
if len(p) == 0 {
return NewArgs(), nil
}
r := strings.NewReader(p)
d := json.NewDecoder(r)
m := map[string]map[string]bool{}
if err := d.Decode(&m); err != nil {
r.Seek(0, 0)
// Allow parsing old arguments in slice format.
// Because other libraries might be sending them in this format.
deprecated := map[string][]string{}
if deprecatedErr := d.Decode(&deprecated); deprecatedErr == nil {
m = deprecatedArgs(deprecated)
} else {
return NewArgs(), err
}
}
return Args{m}, nil
}
// Get returns the list of values associates with a field.
// It returns a slice of strings to keep backwards compatibility with old code.
func (filters Args) Get(field string) []string {
values := filters.fields[field]
if values == nil {
return make([]string, 0)
}
slice := make([]string, 0, len(values))
for key := range values {
slice = append(slice, key)
}
return slice
}
// Add adds a new value to a filter field.
func (filters Args) Add(name, value string) {
if _, ok := filters.fields[name]; ok {
filters.fields[name][value] = true
} else {
filters.fields[name] = map[string]bool{value: true}
}
}
// Del removes a value from a filter field.
func (filters Args) Del(name, value string) {
if _, ok := filters.fields[name]; ok {
delete(filters.fields[name], value)
if len(filters.fields[name]) == 0 {
delete(filters.fields, name)
}
}
}
// Len returns the number of fields in the arguments.
func (filters Args) Len() int {
return len(filters.fields)
}
// MatchKVList returns true if the values for the specified field matches the ones
// from the sources.
// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}},
// field is 'label' and sources are {'label1': '1', 'label2': '2'}
// it returns true.
func (filters Args) MatchKVList(field string, sources map[string]string) bool {
fieldValues := filters.fields[field]
//do not filter if there is no filter set or cannot determine filter
if len(fieldValues) == 0 {
return true
}
if len(sources) == 0 {
return false
}
for name2match := range fieldValues {
testKV := strings.SplitN(name2match, "=", 2)
v, ok := sources[testKV[0]]
if !ok {
return false
}
if len(testKV) == 2 && testKV[1] != v {
return false
}
}
return true
}
// Match returns true if the values for the specified field matches the source string
// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}},
// field is 'image.name' and source is 'ubuntu'
// it returns true.
func (filters Args) Match(field, source string) bool {
if filters.ExactMatch(field, source) {
return true
}
fieldValues := filters.fields[field]
for name2match := range fieldValues {
match, err := regexp.MatchString(name2match, source)
if err != nil {
continue
}
if match {
return true
}
}
return false
}
// ExactMatch returns true if the source matches exactly one of the filters.
func (filters Args) ExactMatch(field, source string) bool {
fieldValues, ok := filters.fields[field]
//do not filter if there is no filter set or cannot determine filter
if !ok || len(fieldValues) == 0 {
return true
}
// try to match full name value to avoid O(N) regular expression matching
return fieldValues[source]
}
// UniqueExactMatch returns true if there is only one filter and the source matches exactly this one.
func (filters Args) UniqueExactMatch(field, source string) bool {
fieldValues := filters.fields[field]
//do not filter if there is no filter set or cannot determine filter
if len(fieldValues) == 0 {
return true
}
if len(filters.fields[field]) != 1 {
return false
}
// try to match full name value to avoid O(N) regular expression matching
return fieldValues[source]
}
// FuzzyMatch returns true if the source matches exactly one of the filters,
// or the source has one of the filters as a prefix.
func (filters Args) FuzzyMatch(field, source string) bool {
if filters.ExactMatch(field, source) {
return true
}
fieldValues := filters.fields[field]
for prefix := range fieldValues {
if strings.HasPrefix(source, prefix) {
return true
}
}
return false
}
// Include returns true if the name of the field to filter is in the filters.
func (filters Args) Include(field string) bool {
_, ok := filters.fields[field]
return ok
}
// Validate ensures that all the fields in the filter are valid.
// It returns an error as soon as it finds an invalid field.
func (filters Args) Validate(accepted map[string]bool) error {
for name := range filters.fields {
if !accepted[name] {
return fmt.Errorf("Invalid filter '%s'", name)
}
}
return nil
}
// WalkValues iterates over the list of filtered values for a field.
// It stops the iteration if it finds an error and it returns that error.
func (filters Args) WalkValues(field string, op func(value string) error) error {
if _, ok := filters.fields[field]; !ok {
return nil
}
for v := range filters.fields[field] {
if err := op(v); err != nil {
return err
}
}
return nil
}
func deprecatedArgs(d map[string][]string) map[string]map[string]bool {
m := map[string]map[string]bool{}
for k, v := range d {
values := map[string]bool{}
for _, vv := range v {
values[vv] = true
}
m[k] = values
}
return m
}
func convertArgsToSlice(f map[string]map[string]bool) map[string][]string {
m := map[string][]string{}
for k, v := range f {
values := []string{}
for kk := range v {
if v[kk] {
values = append(values, kk)
}
}
m[k] = values
}
return m
}

View File

@@ -0,0 +1,17 @@
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// GraphDriverData Information about a container's graph driver.
// swagger:model GraphDriverData
type GraphDriverData struct {
// data
// Required: true
Data map[string]string `json:"Data"`
// name
// Required: true
Name string `json:"Name"`
}

View File

@@ -0,0 +1,13 @@
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// IDResponse Response to an API call that returns just an Id
// swagger:model IdResponse
type IDResponse struct {
// The id of the newly created object.
// Required: true
ID string `json:"Id"`
}

View File

@@ -0,0 +1,15 @@
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// ImageDeleteResponseItem image delete response item
// swagger:model ImageDeleteResponseItem
type ImageDeleteResponseItem struct {
// The image ID of an image that was deleted
Deleted string `json:"Deleted,omitempty"`
// The image ID of an image that was untagged
Untagged string `json:"Untagged,omitempty"`
}

View File

@@ -0,0 +1,49 @@
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// ImageSummary image summary
// swagger:model ImageSummary
type ImageSummary struct {
// containers
// Required: true
Containers int64 `json:"Containers"`
// created
// Required: true
Created int64 `json:"Created"`
// Id
// Required: true
ID string `json:"Id"`
// labels
// Required: true
Labels map[string]string `json:"Labels"`
// parent Id
// Required: true
ParentID string `json:"ParentId"`
// repo digests
// Required: true
RepoDigests []string `json:"RepoDigests"`
// repo tags
// Required: true
RepoTags []string `json:"RepoTags"`
// shared size
// Required: true
SharedSize int64 `json:"SharedSize"`
// size
// Required: true
Size int64 `json:"Size"`
// virtual size
// Required: true
VirtualSize int64 `json:"VirtualSize"`
}

View File

@@ -0,0 +1,113 @@
package mount
import (
"os"
)
// Type represents the type of a mount.
type Type string
// Type constants
const (
// TypeBind is the type for mounting host dir
TypeBind Type = "bind"
// TypeVolume is the type for remote storage volumes
TypeVolume Type = "volume"
// TypeTmpfs is the type for mounting tmpfs
TypeTmpfs Type = "tmpfs"
)
// Mount represents a mount (volume).
type Mount struct {
Type Type `json:",omitempty"`
// Source specifies the name of the mount. Depending on mount type, this
// may be a volume name or a host path, or even ignored.
// Source is not supported for tmpfs (must be an empty value)
Source string `json:",omitempty"`
Target string `json:",omitempty"`
ReadOnly bool `json:",omitempty"`
BindOptions *BindOptions `json:",omitempty"`
VolumeOptions *VolumeOptions `json:",omitempty"`
TmpfsOptions *TmpfsOptions `json:",omitempty"`
}
// Propagation represents the propagation of a mount.
type Propagation string
const (
// PropagationRPrivate RPRIVATE
PropagationRPrivate Propagation = "rprivate"
// PropagationPrivate PRIVATE
PropagationPrivate Propagation = "private"
// PropagationRShared RSHARED
PropagationRShared Propagation = "rshared"
// PropagationShared SHARED
PropagationShared Propagation = "shared"
// PropagationRSlave RSLAVE
PropagationRSlave Propagation = "rslave"
// PropagationSlave SLAVE
PropagationSlave Propagation = "slave"
)
// Propagations is the list of all valid mount propagations
var Propagations = []Propagation{
PropagationRPrivate,
PropagationPrivate,
PropagationRShared,
PropagationShared,
PropagationRSlave,
PropagationSlave,
}
// BindOptions defines options specific to mounts of type "bind".
type BindOptions struct {
Propagation Propagation `json:",omitempty"`
}
// VolumeOptions represents the options for a mount of type volume.
type VolumeOptions struct {
NoCopy bool `json:",omitempty"`
Labels map[string]string `json:",omitempty"`
DriverConfig *Driver `json:",omitempty"`
}
// Driver represents a volume driver.
type Driver struct {
Name string `json:",omitempty"`
Options map[string]string `json:",omitempty"`
}
// TmpfsOptions defines options specific to mounts of type "tmpfs".
type TmpfsOptions struct {
// Size sets the size of the tmpfs, in bytes.
//
// This will be converted to an operating system specific value
// depending on the host. For example, on linux, it will be converted to
// use a 'k', 'm' or 'g' syntax. BSD, though not widely supported with
// docker, uses a straight byte value.
//
// Percentages are not supported.
SizeBytes int64 `json:",omitempty"`
// Mode of the tmpfs upon creation
Mode os.FileMode `json:",omitempty"`
// TODO(stevvooe): There are several more tmpfs flags, specified in the
// daemon, that are accepted. Only the most basic are added for now.
//
// From docker/docker/pkg/mount/flags.go:
//
// var validFlags = map[string]bool{
// "": true,
// "size": true, X
// "mode": true, X
// "uid": true,
// "gid": true,
// "nr_inodes": true,
// "nr_blocks": true,
// "mpol": true,
// }
//
// Some of these may be straightforward to add, but others, such as
// uid/gid have implications in a clustered system.
}

View File

@@ -0,0 +1,86 @@
package network
// Address represents an IP address
type Address struct {
Addr string
PrefixLen int
}
// IPAM represents IP Address Management
type IPAM struct {
Driver string
Options map[string]string //Per network IPAM driver options
Config []IPAMConfig
}
// IPAMConfig represents IPAM configurations
type IPAMConfig struct {
Subnet string `json:",omitempty"`
IPRange string `json:",omitempty"`
Gateway string `json:",omitempty"`
AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"`
}
// EndpointIPAMConfig represents IPAM configurations for the endpoint
type EndpointIPAMConfig struct {
IPv4Address string `json:",omitempty"`
IPv6Address string `json:",omitempty"`
LinkLocalIPs []string `json:",omitempty"`
}
// Copy makes a copy of the endpoint ipam config
func (cfg *EndpointIPAMConfig) Copy() *EndpointIPAMConfig {
cfgCopy := *cfg
cfgCopy.LinkLocalIPs = make([]string, 0, len(cfg.LinkLocalIPs))
cfgCopy.LinkLocalIPs = append(cfgCopy.LinkLocalIPs, cfg.LinkLocalIPs...)
return &cfgCopy
}
// PeerInfo represents one peer of an overlay network
type PeerInfo struct {
Name string
IP string
}
// EndpointSettings stores the network endpoint details
type EndpointSettings struct {
// Configurations
IPAMConfig *EndpointIPAMConfig
Links []string
Aliases []string
// Operational data
NetworkID string
EndpointID string
Gateway string
IPAddress string
IPPrefixLen int
IPv6Gateway string
GlobalIPv6Address string
GlobalIPv6PrefixLen int
MacAddress string
}
// Copy makes a deep copy of `EndpointSettings`
func (es *EndpointSettings) Copy() *EndpointSettings {
epCopy := *es
if es.IPAMConfig != nil {
epCopy.IPAMConfig = es.IPAMConfig.Copy()
}
if es.Links != nil {
links := make([]string, 0, len(es.Links))
epCopy.Links = append(links, es.Links...)
}
if es.Aliases != nil {
aliases := make([]string, 0, len(es.Aliases))
epCopy.Aliases = append(aliases, es.Aliases...)
}
return &epCopy
}
// NetworkingConfig represents the container's networking configuration for each of its interfaces
// Carries the networking configs specified in the `docker run` and `docker network connect` commands
type NetworkingConfig struct {
EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network
}

189
vendor/github.com/docker/docker/api/types/plugin.go generated vendored Normal file
View File

@@ -0,0 +1,189 @@
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// Plugin A plugin for the Engine API
// swagger:model Plugin
type Plugin struct {
// config
// Required: true
Config PluginConfig `json:"Config"`
// True when the plugin is running. False when the plugin is not running, only installed.
// Required: true
Enabled bool `json:"Enabled"`
// Id
ID string `json:"Id,omitempty"`
// name
// Required: true
Name string `json:"Name"`
// plugin remote reference used to push/pull the plugin
PluginReference string `json:"PluginReference,omitempty"`
// settings
// Required: true
Settings PluginSettings `json:"Settings"`
}
// PluginConfig The config of a plugin.
// swagger:model PluginConfig
type PluginConfig struct {
// args
// Required: true
Args PluginConfigArgs `json:"Args"`
// description
// Required: true
Description string `json:"Description"`
// documentation
// Required: true
Documentation string `json:"Documentation"`
// entrypoint
// Required: true
Entrypoint []string `json:"Entrypoint"`
// env
// Required: true
Env []PluginEnv `json:"Env"`
// interface
// Required: true
Interface PluginConfigInterface `json:"Interface"`
// linux
// Required: true
Linux PluginConfigLinux `json:"Linux"`
// mounts
// Required: true
Mounts []PluginMount `json:"Mounts"`
// network
// Required: true
Network PluginConfigNetwork `json:"Network"`
// propagated mount
// Required: true
PropagatedMount string `json:"PropagatedMount"`
// user
User PluginConfigUser `json:"User,omitempty"`
// work dir
// Required: true
WorkDir string `json:"WorkDir"`
// rootfs
Rootfs *PluginConfigRootfs `json:"rootfs,omitempty"`
}
// PluginConfigArgs plugin config args
// swagger:model PluginConfigArgs
type PluginConfigArgs struct {
// description
// Required: true
Description string `json:"Description"`
// name
// Required: true
Name string `json:"Name"`
// settable
// Required: true
Settable []string `json:"Settable"`
// value
// Required: true
Value []string `json:"Value"`
}
// PluginConfigInterface The interface between Docker and the plugin
// swagger:model PluginConfigInterface
type PluginConfigInterface struct {
// socket
// Required: true
Socket string `json:"Socket"`
// types
// Required: true
Types []PluginInterfaceType `json:"Types"`
}
// PluginConfigLinux plugin config linux
// swagger:model PluginConfigLinux
type PluginConfigLinux struct {
// allow all devices
// Required: true
AllowAllDevices bool `json:"AllowAllDevices"`
// capabilities
// Required: true
Capabilities []string `json:"Capabilities"`
// devices
// Required: true
Devices []PluginDevice `json:"Devices"`
}
// PluginConfigNetwork plugin config network
// swagger:model PluginConfigNetwork
type PluginConfigNetwork struct {
// type
// Required: true
Type string `json:"Type"`
}
// PluginConfigRootfs plugin config rootfs
// swagger:model PluginConfigRootfs
type PluginConfigRootfs struct {
// diff ids
DiffIds []string `json:"diff_ids"`
// type
Type string `json:"type,omitempty"`
}
// PluginConfigUser plugin config user
// swagger:model PluginConfigUser
type PluginConfigUser struct {
// g ID
GID uint32 `json:"GID,omitempty"`
// UID
UID uint32 `json:"UID,omitempty"`
}
// PluginSettings Settings that can be modified by users.
// swagger:model PluginSettings
type PluginSettings struct {
// args
// Required: true
Args []string `json:"Args"`
// devices
// Required: true
Devices []PluginDevice `json:"Devices"`
// env
// Required: true
Env []string `json:"Env"`
// mounts
// Required: true
Mounts []PluginMount `json:"Mounts"`
}

View File

@@ -0,0 +1,25 @@
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// PluginDevice plugin device
// swagger:model PluginDevice
type PluginDevice struct {
// description
// Required: true
Description string `json:"Description"`
// name
// Required: true
Name string `json:"Name"`
// path
// Required: true
Path *string `json:"Path"`
// settable
// Required: true
Settable []string `json:"Settable"`
}

View File

@@ -0,0 +1,25 @@
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// PluginEnv plugin env
// swagger:model PluginEnv
type PluginEnv struct {
// description
// Required: true
Description string `json:"Description"`
// name
// Required: true
Name string `json:"Name"`
// settable
// Required: true
Settable []string `json:"Settable"`
// value
// Required: true
Value *string `json:"Value"`
}

View File

@@ -0,0 +1,21 @@
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// PluginInterfaceType plugin interface type
// swagger:model PluginInterfaceType
type PluginInterfaceType struct {
// capability
// Required: true
Capability string `json:"Capability"`
// prefix
// Required: true
Prefix string `json:"Prefix"`
// version
// Required: true
Version string `json:"Version"`
}

View File

@@ -0,0 +1,37 @@
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// PluginMount plugin mount
// swagger:model PluginMount
type PluginMount struct {
// description
// Required: true
Description string `json:"Description"`
// destination
// Required: true
Destination string `json:"Destination"`
// name
// Required: true
Name string `json:"Name"`
// options
// Required: true
Options []string `json:"Options"`
// settable
// Required: true
Settable []string `json:"Settable"`
// source
// Required: true
Source *string `json:"Source"`
// type
// Required: true
Type string `json:"Type"`
}

View File

@@ -0,0 +1,79 @@
package types
import (
"encoding/json"
"fmt"
"sort"
)
// PluginsListResponse contains the response for the Engine API
type PluginsListResponse []*Plugin
const (
authzDriver = "AuthzDriver"
graphDriver = "GraphDriver"
ipamDriver = "IpamDriver"
networkDriver = "NetworkDriver"
volumeDriver = "VolumeDriver"
)
// UnmarshalJSON implements json.Unmarshaler for PluginInterfaceType
func (t *PluginInterfaceType) UnmarshalJSON(p []byte) error {
versionIndex := len(p)
prefixIndex := 0
if len(p) < 2 || p[0] != '"' || p[len(p)-1] != '"' {
return fmt.Errorf("%q is not a plugin interface type", p)
}
p = p[1 : len(p)-1]
loop:
for i, b := range p {
switch b {
case '.':
prefixIndex = i
case '/':
versionIndex = i
break loop
}
}
t.Prefix = string(p[:prefixIndex])
t.Capability = string(p[prefixIndex+1 : versionIndex])
if versionIndex < len(p) {
t.Version = string(p[versionIndex+1:])
}
return nil
}
// MarshalJSON implements json.Marshaler for PluginInterfaceType
func (t *PluginInterfaceType) MarshalJSON() ([]byte, error) {
return json.Marshal(t.String())
}
// String implements fmt.Stringer for PluginInterfaceType
func (t PluginInterfaceType) String() string {
return fmt.Sprintf("%s.%s/%s", t.Prefix, t.Capability, t.Version)
}
// PluginPrivilege describes a permission the user has to accept
// upon installing a plugin.
type PluginPrivilege struct {
Name string
Description string
Value []string
}
// PluginPrivileges is a list of PluginPrivilege
type PluginPrivileges []PluginPrivilege
func (s PluginPrivileges) Len() int {
return len(s)
}
func (s PluginPrivileges) Less(i, j int) bool {
return s[i].Name < s[j].Name
}
func (s PluginPrivileges) Swap(i, j int) {
sort.Strings(s[i].Value)
sort.Strings(s[j].Value)
s[i], s[j] = s[j], s[i]
}

23
vendor/github.com/docker/docker/api/types/port.go generated vendored Normal file
View File

@@ -0,0 +1,23 @@
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// Port An open port on a container
// swagger:model Port
type Port struct {
// IP
IP string `json:"IP,omitempty"`
// Port on the container
// Required: true
PrivatePort uint16 `json:"PrivatePort"`
// Port exposed on the host
PublicPort uint16 `json:"PublicPort,omitempty"`
// type
// Required: true
Type string `json:"Type"`
}

View File

@@ -0,0 +1,21 @@
package registry
// ----------------------------------------------------------------------------
// DO NOT EDIT THIS FILE
// This file was generated by `swagger generate operation`
//
// See hack/generate-swagger-api.sh
// ----------------------------------------------------------------------------
// AuthenticateOKBody authenticate o k body
// swagger:model AuthenticateOKBody
type AuthenticateOKBody struct {
// An opaque token used to authenticate a user after a successful login
// Required: true
IdentityToken string `json:"IdentityToken"`
// The status of the authentication
// Required: true
Status string `json:"Status"`
}

View File

@@ -0,0 +1,104 @@
package registry
import (
"encoding/json"
"net"
)
// ServiceConfig stores daemon registry services configuration.
type ServiceConfig struct {
InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"`
IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"`
Mirrors []string
}
// NetIPNet is the net.IPNet type, which can be marshalled and
// unmarshalled to JSON
type NetIPNet net.IPNet
// String returns the CIDR notation of ipnet
func (ipnet *NetIPNet) String() string {
return (*net.IPNet)(ipnet).String()
}
// MarshalJSON returns the JSON representation of the IPNet
func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) {
return json.Marshal((*net.IPNet)(ipnet).String())
}
// UnmarshalJSON sets the IPNet from a byte array of JSON
func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) {
var ipnetStr string
if err = json.Unmarshal(b, &ipnetStr); err == nil {
var cidr *net.IPNet
if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil {
*ipnet = NetIPNet(*cidr)
}
}
return
}
// IndexInfo contains information about a registry
//
// RepositoryInfo Examples:
// {
// "Index" : {
// "Name" : "docker.io",
// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"],
// "Secure" : true,
// "Official" : true,
// },
// "RemoteName" : "library/debian",
// "LocalName" : "debian",
// "CanonicalName" : "docker.io/debian"
// "Official" : true,
// }
//
// {
// "Index" : {
// "Name" : "127.0.0.1:5000",
// "Mirrors" : [],
// "Secure" : false,
// "Official" : false,
// },
// "RemoteName" : "user/repo",
// "LocalName" : "127.0.0.1:5000/user/repo",
// "CanonicalName" : "127.0.0.1:5000/user/repo",
// "Official" : false,
// }
type IndexInfo struct {
// Name is the name of the registry, such as "docker.io"
Name string
// Mirrors is a list of mirrors, expressed as URIs
Mirrors []string
// Secure is set to false if the registry is part of the list of
// insecure registries. Insecure registries accept HTTP and/or accept
// HTTPS with certificates from unknown CAs.
Secure bool
// Official indicates whether this is an official registry
Official bool
}
// SearchResult describes a search result returned from a registry
type SearchResult struct {
// StarCount indicates the number of stars this repository has
StarCount int `json:"star_count"`
// IsOfficial is true if the result is from an official repository.
IsOfficial bool `json:"is_official"`
// Name is the name of the repository
Name string `json:"name"`
// IsAutomated indicates whether the result is automated
IsAutomated bool `json:"is_automated"`
// Description is a textual description of the repository
Description string `json:"description"`
}
// SearchResults lists a collection search results returned from a registry
type SearchResults struct {
// Query contains the query string that generated the search results
Query string `json:"query"`
// NumResults indicates the number of results the query returned
NumResults int `json:"num_results"`
// Results is a slice containing the actual results for the search
Results []SearchResult `json:"results"`
}

93
vendor/github.com/docker/docker/api/types/seccomp.go generated vendored Normal file
View File

@@ -0,0 +1,93 @@
package types
// Seccomp represents the config for a seccomp profile for syscall restriction.
type Seccomp struct {
DefaultAction Action `json:"defaultAction"`
// Architectures is kept to maintain backward compatibility with the old
// seccomp profile.
Architectures []Arch `json:"architectures,omitempty"`
ArchMap []Architecture `json:"archMap,omitempty"`
Syscalls []*Syscall `json:"syscalls"`
}
// Architecture is used to represent a specific architecture
// and its sub-architectures
type Architecture struct {
Arch Arch `json:"architecture"`
SubArches []Arch `json:"subArchitectures"`
}
// Arch used for architectures
type Arch string
// Additional architectures permitted to be used for system calls
// By default only the native architecture of the kernel is permitted
const (
ArchX86 Arch = "SCMP_ARCH_X86"
ArchX86_64 Arch = "SCMP_ARCH_X86_64"
ArchX32 Arch = "SCMP_ARCH_X32"
ArchARM Arch = "SCMP_ARCH_ARM"
ArchAARCH64 Arch = "SCMP_ARCH_AARCH64"
ArchMIPS Arch = "SCMP_ARCH_MIPS"
ArchMIPS64 Arch = "SCMP_ARCH_MIPS64"
ArchMIPS64N32 Arch = "SCMP_ARCH_MIPS64N32"
ArchMIPSEL Arch = "SCMP_ARCH_MIPSEL"
ArchMIPSEL64 Arch = "SCMP_ARCH_MIPSEL64"
ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32"
ArchPPC Arch = "SCMP_ARCH_PPC"
ArchPPC64 Arch = "SCMP_ARCH_PPC64"
ArchPPC64LE Arch = "SCMP_ARCH_PPC64LE"
ArchS390 Arch = "SCMP_ARCH_S390"
ArchS390X Arch = "SCMP_ARCH_S390X"
)
// Action taken upon Seccomp rule match
type Action string
// Define actions for Seccomp rules
const (
ActKill Action = "SCMP_ACT_KILL"
ActTrap Action = "SCMP_ACT_TRAP"
ActErrno Action = "SCMP_ACT_ERRNO"
ActTrace Action = "SCMP_ACT_TRACE"
ActAllow Action = "SCMP_ACT_ALLOW"
)
// Operator used to match syscall arguments in Seccomp
type Operator string
// Define operators for syscall arguments in Seccomp
const (
OpNotEqual Operator = "SCMP_CMP_NE"
OpLessThan Operator = "SCMP_CMP_LT"
OpLessEqual Operator = "SCMP_CMP_LE"
OpEqualTo Operator = "SCMP_CMP_EQ"
OpGreaterEqual Operator = "SCMP_CMP_GE"
OpGreaterThan Operator = "SCMP_CMP_GT"
OpMaskedEqual Operator = "SCMP_CMP_MASKED_EQ"
)
// Arg used for matching specific syscall arguments in Seccomp
type Arg struct {
Index uint `json:"index"`
Value uint64 `json:"value"`
ValueTwo uint64 `json:"valueTwo"`
Op Operator `json:"op"`
}
// Filter is used to conditionally apply Seccomp rules
type Filter struct {
Caps []string `json:"caps,omitempty"`
Arches []string `json:"arches,omitempty"`
}
// Syscall is used to match a group of syscalls in Seccomp
type Syscall struct {
Name string `json:"name,omitempty"`
Names []string `json:"names,omitempty"`
Action Action `json:"action"`
Args []*Arg `json:"args"`
Comment string `json:"comment"`
Includes Filter `json:"includes"`
Excludes Filter `json:"excludes"`
}

View File

@@ -0,0 +1,12 @@
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// ServiceUpdateResponse service update response
// swagger:model ServiceUpdateResponse
type ServiceUpdateResponse struct {
// Optional warning messages
Warnings []string `json:"Warnings"`
}

178
vendor/github.com/docker/docker/api/types/stats.go generated vendored Normal file
View File

@@ -0,0 +1,178 @@
// Package types is used for API stability in the types and response to the
// consumers of the API stats endpoint.
package types
import "time"
// ThrottlingData stores CPU throttling stats of one running container.
// Not used on Windows.
type ThrottlingData struct {
// Number of periods with throttling active
Periods uint64 `json:"periods"`
// Number of periods when the container hits its throttling limit.
ThrottledPeriods uint64 `json:"throttled_periods"`
// Aggregate time the container was throttled for in nanoseconds.
ThrottledTime uint64 `json:"throttled_time"`
}
// CPUUsage stores All CPU stats aggregated since container inception.
type CPUUsage struct {
// Total CPU time consumed.
// Units: nanoseconds (Linux)
// Units: 100's of nanoseconds (Windows)
TotalUsage uint64 `json:"total_usage"`
// Total CPU time consumed per core (Linux). Not used on Windows.
// Units: nanoseconds.
PercpuUsage []uint64 `json:"percpu_usage,omitempty"`
// Time spent by tasks of the cgroup in kernel mode (Linux).
// Time spent by all container processes in kernel mode (Windows).
// Units: nanoseconds (Linux).
// Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers.
UsageInKernelmode uint64 `json:"usage_in_kernelmode"`
// Time spent by tasks of the cgroup in user mode (Linux).
// Time spent by all container processes in user mode (Windows).
// Units: nanoseconds (Linux).
// Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers
UsageInUsermode uint64 `json:"usage_in_usermode"`
}
// CPUStats aggregates and wraps all CPU related info of container
type CPUStats struct {
// CPU Usage. Linux and Windows.
CPUUsage CPUUsage `json:"cpu_usage"`
// System Usage. Linux only.
SystemUsage uint64 `json:"system_cpu_usage,omitempty"`
// Throttling Data. Linux only.
ThrottlingData ThrottlingData `json:"throttling_data,omitempty"`
}
// MemoryStats aggregates all memory stats since container inception on Linux.
// Windows returns stats for commit and private working set only.
type MemoryStats struct {
// Linux Memory Stats
// current res_counter usage for memory
Usage uint64 `json:"usage,omitempty"`
// maximum usage ever recorded.
MaxUsage uint64 `json:"max_usage,omitempty"`
// TODO(vishh): Export these as stronger types.
// all the stats exported via memory.stat.
Stats map[string]uint64 `json:"stats,omitempty"`
// number of times memory usage hits limits.
Failcnt uint64 `json:"failcnt,omitempty"`
Limit uint64 `json:"limit,omitempty"`
// Windows Memory Stats
// See https://technet.microsoft.com/en-us/magazine/ff382715.aspx
// committed bytes
Commit uint64 `json:"commitbytes,omitempty"`
// peak committed bytes
CommitPeak uint64 `json:"commitpeakbytes,omitempty"`
// private working set
PrivateWorkingSet uint64 `json:"privateworkingset,omitempty"`
}
// BlkioStatEntry is one small entity to store a piece of Blkio stats
// Not used on Windows.
type BlkioStatEntry struct {
Major uint64 `json:"major"`
Minor uint64 `json:"minor"`
Op string `json:"op"`
Value uint64 `json:"value"`
}
// BlkioStats stores All IO service stats for data read and write.
// This is a Linux specific structure as the differences between expressing
// block I/O on Windows and Linux are sufficiently significant to make
// little sense attempting to morph into a combined structure.
type BlkioStats struct {
// number of bytes transferred to and from the block device
IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"`
IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"`
IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"`
IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"`
IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"`
IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"`
IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"`
SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"`
}
// StorageStats is the disk I/O stats for read/write on Windows.
type StorageStats struct {
ReadCountNormalized uint64 `json:"read_count_normalized,omitempty"`
ReadSizeBytes uint64 `json:"read_size_bytes,omitempty"`
WriteCountNormalized uint64 `json:"write_count_normalized,omitempty"`
WriteSizeBytes uint64 `json:"write_size_bytes,omitempty"`
}
// NetworkStats aggregates the network stats of one container
type NetworkStats struct {
// Bytes received. Windows and Linux.
RxBytes uint64 `json:"rx_bytes"`
// Packets received. Windows and Linux.
RxPackets uint64 `json:"rx_packets"`
// Received errors. Not used on Windows. Note that we dont `omitempty` this
// field as it is expected in the >=v1.21 API stats structure.
RxErrors uint64 `json:"rx_errors"`
// Incoming packets dropped. Windows and Linux.
RxDropped uint64 `json:"rx_dropped"`
// Bytes sent. Windows and Linux.
TxBytes uint64 `json:"tx_bytes"`
// Packets sent. Windows and Linux.
TxPackets uint64 `json:"tx_packets"`
// Sent errors. Not used on Windows. Note that we dont `omitempty` this
// field as it is expected in the >=v1.21 API stats structure.
TxErrors uint64 `json:"tx_errors"`
// Outgoing packets dropped. Windows and Linux.
TxDropped uint64 `json:"tx_dropped"`
// Endpoint ID. Not used on Linux.
EndpointID string `json:"endpoint_id,omitempty"`
// Instance ID. Not used on Linux.
InstanceID string `json:"instance_id,omitempty"`
}
// PidsStats contains the stats of a container's pids
type PidsStats struct {
// Current is the number of pids in the cgroup
Current uint64 `json:"current,omitempty"`
// Limit is the hard limit on the number of pids in the cgroup.
// A "Limit" of 0 means that there is no limit.
Limit uint64 `json:"limit,omitempty"`
}
// Stats is Ultimate struct aggregating all types of stats of one container
type Stats struct {
// Common stats
Read time.Time `json:"read"`
PreRead time.Time `json:"preread"`
// Linux specific stats, not populated on Windows.
PidsStats PidsStats `json:"pids_stats,omitempty"`
BlkioStats BlkioStats `json:"blkio_stats,omitempty"`
// Windows specific stats, not populated on Linux.
NumProcs uint32 `json:"num_procs"`
StorageStats StorageStats `json:"storage_stats,omitempty"`
// Shared stats
CPUStats CPUStats `json:"cpu_stats,omitempty"`
PreCPUStats CPUStats `json:"precpu_stats,omitempty"` // "Pre"="Previous"
MemoryStats MemoryStats `json:"memory_stats,omitempty"`
}
// StatsJSON is newly used Networks
type StatsJSON struct {
Stats
Name string `json:"name,omitempty"`
ID string `json:"id,omitempty"`
// Networks request version >=1.21
Networks map[string]NetworkStats `json:"networks,omitempty"`
}

View File

@@ -0,0 +1,30 @@
package strslice
import "encoding/json"
// StrSlice represents a string or an array of strings.
// We need to override the json decoder to accept both options.
type StrSlice []string
// UnmarshalJSON decodes the byte slice whether it's a string or an array of
// strings. This method is needed to implement json.Unmarshaler.
func (e *StrSlice) UnmarshalJSON(b []byte) error {
if len(b) == 0 {
// With no input, we preserve the existing value by returning nil and
// leaving the target alone. This allows defining default values for
// the type.
return nil
}
p := make([]string, 0, 1)
if err := json.Unmarshal(b, &p); err != nil {
var s string
if err := json.Unmarshal(b, &s); err != nil {
return err
}
p = append(p, s)
}
*e = p
return nil
}

View File

@@ -0,0 +1,27 @@
package swarm
import "time"
// Version represents the internal object version.
type Version struct {
Index uint64 `json:",omitempty"`
}
// Meta is a base object inherited by most of the other once.
type Meta struct {
Version Version `json:",omitempty"`
CreatedAt time.Time `json:",omitempty"`
UpdatedAt time.Time `json:",omitempty"`
}
// Annotations represents how to describe an object.
type Annotations struct {
Name string `json:",omitempty"`
Labels map[string]string `json:"Labels"`
}
// Driver represents a driver (network, logging).
type Driver struct {
Name string `json:",omitempty"`
Options map[string]string `json:",omitempty"`
}

View File

@@ -0,0 +1,47 @@
package swarm
import (
"time"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/mount"
)
// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf)
// Detailed documentation is available in:
// http://man7.org/linux/man-pages/man5/resolv.conf.5.html
// `nameserver`, `search`, `options` have been supported.
// TODO: `domain` is not supported yet.
type DNSConfig struct {
// Nameservers specifies the IP addresses of the name servers
Nameservers []string `json:",omitempty"`
// Search specifies the search list for host-name lookup
Search []string `json:",omitempty"`
// Options allows certain internal resolver variables to be modified
Options []string `json:",omitempty"`
}
// ContainerSpec represents the spec of a container.
type ContainerSpec struct {
Image string `json:",omitempty"`
Labels map[string]string `json:",omitempty"`
Command []string `json:",omitempty"`
Args []string `json:",omitempty"`
Hostname string `json:",omitempty"`
Env []string `json:",omitempty"`
Dir string `json:",omitempty"`
User string `json:",omitempty"`
Groups []string `json:",omitempty"`
TTY bool `json:",omitempty"`
OpenStdin bool `json:",omitempty"`
ReadOnly bool `json:",omitempty"`
Mounts []mount.Mount `json:",omitempty"`
StopGracePeriod *time.Duration `json:",omitempty"`
Healthcheck *container.HealthConfig `json:",omitempty"`
// The format of extra hosts on swarmkit is specified in:
// http://man7.org/linux/man-pages/man5/hosts.5.html
// IP_address canonical_hostname [aliases...]
Hosts []string `json:",omitempty"`
DNSConfig *DNSConfig `json:",omitempty"`
Secrets []*SecretReference `json:",omitempty"`
}

View File

@@ -0,0 +1,111 @@
package swarm
// Endpoint represents an endpoint.
type Endpoint struct {
Spec EndpointSpec `json:",omitempty"`
Ports []PortConfig `json:",omitempty"`
VirtualIPs []EndpointVirtualIP `json:",omitempty"`
}
// EndpointSpec represents the spec of an endpoint.
type EndpointSpec struct {
Mode ResolutionMode `json:",omitempty"`
Ports []PortConfig `json:",omitempty"`
}
// ResolutionMode represents a resolution mode.
type ResolutionMode string
const (
// ResolutionModeVIP VIP
ResolutionModeVIP ResolutionMode = "vip"
// ResolutionModeDNSRR DNSRR
ResolutionModeDNSRR ResolutionMode = "dnsrr"
)
// PortConfig represents the config of a port.
type PortConfig struct {
Name string `json:",omitempty"`
Protocol PortConfigProtocol `json:",omitempty"`
// TargetPort is the port inside the container
TargetPort uint32 `json:",omitempty"`
// PublishedPort is the port on the swarm hosts
PublishedPort uint32 `json:",omitempty"`
// PublishMode is the mode in which port is published
PublishMode PortConfigPublishMode `json:",omitempty"`
}
// PortConfigPublishMode represents the mode in which the port is to
// be published.
type PortConfigPublishMode string
const (
// PortConfigPublishModeIngress is used for ports published
// for ingress load balancing using routing mesh.
PortConfigPublishModeIngress PortConfigPublishMode = "ingress"
// PortConfigPublishModeHost is used for ports published
// for direct host level access on the host where the task is running.
PortConfigPublishModeHost PortConfigPublishMode = "host"
)
// PortConfigProtocol represents the protocol of a port.
type PortConfigProtocol string
const (
// TODO(stevvooe): These should be used generally, not just for PortConfig.
// PortConfigProtocolTCP TCP
PortConfigProtocolTCP PortConfigProtocol = "tcp"
// PortConfigProtocolUDP UDP
PortConfigProtocolUDP PortConfigProtocol = "udp"
)
// EndpointVirtualIP represents the virtual ip of a port.
type EndpointVirtualIP struct {
NetworkID string `json:",omitempty"`
Addr string `json:",omitempty"`
}
// Network represents a network.
type Network struct {
ID string
Meta
Spec NetworkSpec `json:",omitempty"`
DriverState Driver `json:",omitempty"`
IPAMOptions *IPAMOptions `json:",omitempty"`
}
// NetworkSpec represents the spec of a network.
type NetworkSpec struct {
Annotations
DriverConfiguration *Driver `json:",omitempty"`
IPv6Enabled bool `json:",omitempty"`
Internal bool `json:",omitempty"`
Attachable bool `json:",omitempty"`
IPAMOptions *IPAMOptions `json:",omitempty"`
}
// NetworkAttachmentConfig represents the configuration of a network attachment.
type NetworkAttachmentConfig struct {
Target string `json:",omitempty"`
Aliases []string `json:",omitempty"`
}
// NetworkAttachment represents a network attachment.
type NetworkAttachment struct {
Network Network `json:",omitempty"`
Addresses []string `json:",omitempty"`
}
// IPAMOptions represents ipam options.
type IPAMOptions struct {
Driver Driver `json:",omitempty"`
Configs []IPAMConfig `json:",omitempty"`
}
// IPAMConfig represents ipam configuration.
type IPAMConfig struct {
Subnet string `json:",omitempty"`
Range string `json:",omitempty"`
Gateway string `json:",omitempty"`
}

114
vendor/github.com/docker/docker/api/types/swarm/node.go generated vendored Normal file
View File

@@ -0,0 +1,114 @@
package swarm
// Node represents a node.
type Node struct {
ID string
Meta
// Spec defines the desired state of the node as specified by the user.
// The system will honor this and will *never* modify it.
Spec NodeSpec `json:",omitempty"`
// Description encapsulates the properties of the Node as reported by the
// agent.
Description NodeDescription `json:",omitempty"`
// Status provides the current status of the node, as seen by the manager.
Status NodeStatus `json:",omitempty"`
// ManagerStatus provides the current status of the node's manager
// component, if the node is a manager.
ManagerStatus *ManagerStatus `json:",omitempty"`
}
// NodeSpec represents the spec of a node.
type NodeSpec struct {
Annotations
Role NodeRole `json:",omitempty"`
Availability NodeAvailability `json:",omitempty"`
}
// NodeRole represents the role of a node.
type NodeRole string
const (
// NodeRoleWorker WORKER
NodeRoleWorker NodeRole = "worker"
// NodeRoleManager MANAGER
NodeRoleManager NodeRole = "manager"
)
// NodeAvailability represents the availability of a node.
type NodeAvailability string
const (
// NodeAvailabilityActive ACTIVE
NodeAvailabilityActive NodeAvailability = "active"
// NodeAvailabilityPause PAUSE
NodeAvailabilityPause NodeAvailability = "pause"
// NodeAvailabilityDrain DRAIN
NodeAvailabilityDrain NodeAvailability = "drain"
)
// NodeDescription represents the description of a node.
type NodeDescription struct {
Hostname string `json:",omitempty"`
Platform Platform `json:",omitempty"`
Resources Resources `json:",omitempty"`
Engine EngineDescription `json:",omitempty"`
}
// Platform represents the platform (Arch/OS).
type Platform struct {
Architecture string `json:",omitempty"`
OS string `json:",omitempty"`
}
// EngineDescription represents the description of an engine.
type EngineDescription struct {
EngineVersion string `json:",omitempty"`
Labels map[string]string `json:",omitempty"`
Plugins []PluginDescription `json:",omitempty"`
}
// PluginDescription represents the description of an engine plugin.
type PluginDescription struct {
Type string `json:",omitempty"`
Name string `json:",omitempty"`
}
// NodeStatus represents the status of a node.
type NodeStatus struct {
State NodeState `json:",omitempty"`
Message string `json:",omitempty"`
Addr string `json:",omitempty"`
}
// Reachability represents the reachability of a node.
type Reachability string
const (
// ReachabilityUnknown UNKNOWN
ReachabilityUnknown Reachability = "unknown"
// ReachabilityUnreachable UNREACHABLE
ReachabilityUnreachable Reachability = "unreachable"
// ReachabilityReachable REACHABLE
ReachabilityReachable Reachability = "reachable"
)
// ManagerStatus represents the status of a manager.
type ManagerStatus struct {
Leader bool `json:",omitempty"`
Reachability Reachability `json:",omitempty"`
Addr string `json:",omitempty"`
}
// NodeState represents the state of a node.
type NodeState string
const (
// NodeStateUnknown UNKNOWN
NodeStateUnknown NodeState = "unknown"
// NodeStateDown DOWN
NodeStateDown NodeState = "down"
// NodeStateReady READY
NodeStateReady NodeState = "ready"
// NodeStateDisconnected DISCONNECTED
NodeStateDisconnected NodeState = "disconnected"
)

View File

@@ -0,0 +1,31 @@
package swarm
import "os"
// Secret represents a secret.
type Secret struct {
ID string
Meta
Spec SecretSpec
}
// SecretSpec represents a secret specification from a secret in swarm
type SecretSpec struct {
Annotations
Data []byte `json:",omitempty"`
}
// SecretReferenceFileTarget is a file target in a secret reference
type SecretReferenceFileTarget struct {
Name string
UID string
GID string
Mode os.FileMode
}
// SecretReference is a reference to a secret in swarm
type SecretReference struct {
File *SecretReferenceFileTarget
SecretID string
SecretName string
}

View File

@@ -0,0 +1,105 @@
package swarm
import "time"
// Service represents a service.
type Service struct {
ID string
Meta
Spec ServiceSpec `json:",omitempty"`
PreviousSpec *ServiceSpec `json:",omitempty"`
Endpoint Endpoint `json:",omitempty"`
UpdateStatus *UpdateStatus `json:",omitempty"`
}
// ServiceSpec represents the spec of a service.
type ServiceSpec struct {
Annotations
// TaskTemplate defines how the service should construct new tasks when
// orchestrating this service.
TaskTemplate TaskSpec `json:",omitempty"`
Mode ServiceMode `json:",omitempty"`
UpdateConfig *UpdateConfig `json:",omitempty"`
// Networks field in ServiceSpec is deprecated. The
// same field in TaskSpec should be used instead.
// This field will be removed in a future release.
Networks []NetworkAttachmentConfig `json:",omitempty"`
EndpointSpec *EndpointSpec `json:",omitempty"`
}
// ServiceMode represents the mode of a service.
type ServiceMode struct {
Replicated *ReplicatedService `json:",omitempty"`
Global *GlobalService `json:",omitempty"`
}
// UpdateState is the state of a service update.
type UpdateState string
const (
// UpdateStateUpdating is the updating state.
UpdateStateUpdating UpdateState = "updating"
// UpdateStatePaused is the paused state.
UpdateStatePaused UpdateState = "paused"
// UpdateStateCompleted is the completed state.
UpdateStateCompleted UpdateState = "completed"
)
// UpdateStatus reports the status of a service update.
type UpdateStatus struct {
State UpdateState `json:",omitempty"`
StartedAt *time.Time `json:",omitempty"`
CompletedAt *time.Time `json:",omitempty"`
Message string `json:",omitempty"`
}
// ReplicatedService is a kind of ServiceMode.
type ReplicatedService struct {
Replicas *uint64 `json:",omitempty"`
}
// GlobalService is a kind of ServiceMode.
type GlobalService struct{}
const (
// UpdateFailureActionPause PAUSE
UpdateFailureActionPause = "pause"
// UpdateFailureActionContinue CONTINUE
UpdateFailureActionContinue = "continue"
)
// UpdateConfig represents the update configuration.
type UpdateConfig struct {
// Maximum number of tasks to be updated in one iteration.
// 0 means unlimited parallelism.
Parallelism uint64
// Amount of time between updates.
Delay time.Duration `json:",omitempty"`
// FailureAction is the action to take when an update failures.
FailureAction string `json:",omitempty"`
// Monitor indicates how long to monitor a task for failure after it is
// created. If the task fails by ending up in one of the states
// REJECTED, COMPLETED, or FAILED, within Monitor from its creation,
// this counts as a failure. If it fails after Monitor, it does not
// count as a failure. If Monitor is unspecified, a default value will
// be used.
Monitor time.Duration `json:",omitempty"`
// MaxFailureRatio is the fraction of tasks that may fail during
// an update before the failure action is invoked. Any task created by
// the current update which ends up in one of the states REJECTED,
// COMPLETED or FAILED within Monitor from its creation counts as a
// failure. The number of failures is divided by the number of tasks
// being updated, and if this fraction is greater than
// MaxFailureRatio, the failure action is invoked.
//
// If the failure action is CONTINUE, there is no effect.
// If the failure action is PAUSE, no more tasks will be updated until
// another update is started.
MaxFailureRatio float32
}

View File

@@ -0,0 +1,199 @@
package swarm
import "time"
// ClusterInfo represents info about the cluster for outputing in "info"
// it contains the same information as "Swarm", but without the JoinTokens
type ClusterInfo struct {
ID string
Meta
Spec Spec
}
// Swarm represents a swarm.
type Swarm struct {
ClusterInfo
JoinTokens JoinTokens
}
// JoinTokens contains the tokens workers and managers need to join the swarm.
type JoinTokens struct {
// Worker is the join token workers may use to join the swarm.
Worker string
// Manager is the join token managers may use to join the swarm.
Manager string
}
// Spec represents the spec of a swarm.
type Spec struct {
Annotations
Orchestration OrchestrationConfig `json:",omitempty"`
Raft RaftConfig `json:",omitempty"`
Dispatcher DispatcherConfig `json:",omitempty"`
CAConfig CAConfig `json:",omitempty"`
TaskDefaults TaskDefaults `json:",omitempty"`
EncryptionConfig EncryptionConfig `json:",omitempty"`
}
// OrchestrationConfig represents orchestration configuration.
type OrchestrationConfig struct {
// TaskHistoryRetentionLimit is the number of historic tasks to keep per instance or
// node. If negative, never remove completed or failed tasks.
TaskHistoryRetentionLimit *int64 `json:",omitempty"`
}
// TaskDefaults parameterizes cluster-level task creation with default values.
type TaskDefaults struct {
// LogDriver selects the log driver to use for tasks created in the
// orchestrator if unspecified by a service.
//
// Updating this value will only have an affect on new tasks. Old tasks
// will continue use their previously configured log driver until
// recreated.
LogDriver *Driver `json:",omitempty"`
}
// EncryptionConfig controls at-rest encryption of data and keys.
type EncryptionConfig struct {
// AutoLockManagers specifies whether or not managers TLS keys and raft data
// should be encrypted at rest in such a way that they must be unlocked
// before the manager node starts up again.
AutoLockManagers bool
}
// RaftConfig represents raft configuration.
type RaftConfig struct {
// SnapshotInterval is the number of log entries between snapshots.
SnapshotInterval uint64 `json:",omitempty"`
// KeepOldSnapshots is the number of snapshots to keep beyond the
// current snapshot.
KeepOldSnapshots *uint64 `json:",omitempty"`
// LogEntriesForSlowFollowers is the number of log entries to keep
// around to sync up slow followers after a snapshot is created.
LogEntriesForSlowFollowers uint64 `json:",omitempty"`
// ElectionTick is the number of ticks that a follower will wait for a message
// from the leader before becoming a candidate and starting an election.
// ElectionTick must be greater than HeartbeatTick.
//
// A tick currently defaults to one second, so these translate directly to
// seconds currently, but this is NOT guaranteed.
ElectionTick int
// HeartbeatTick is the number of ticks between heartbeats. Every
// HeartbeatTick ticks, the leader will send a heartbeat to the
// followers.
//
// A tick currently defaults to one second, so these translate directly to
// seconds currently, but this is NOT guaranteed.
HeartbeatTick int
}
// DispatcherConfig represents dispatcher configuration.
type DispatcherConfig struct {
// HeartbeatPeriod defines how often agent should send heartbeats to
// dispatcher.
HeartbeatPeriod time.Duration `json:",omitempty"`
}
// CAConfig represents CA configuration.
type CAConfig struct {
// NodeCertExpiry is the duration certificates should be issued for
NodeCertExpiry time.Duration `json:",omitempty"`
// ExternalCAs is a list of CAs to which a manager node will make
// certificate signing requests for node certificates.
ExternalCAs []*ExternalCA `json:",omitempty"`
}
// ExternalCAProtocol represents type of external CA.
type ExternalCAProtocol string
// ExternalCAProtocolCFSSL CFSSL
const ExternalCAProtocolCFSSL ExternalCAProtocol = "cfssl"
// ExternalCA defines external CA to be used by the cluster.
type ExternalCA struct {
// Protocol is the protocol used by this external CA.
Protocol ExternalCAProtocol
// URL is the URL where the external CA can be reached.
URL string
// Options is a set of additional key/value pairs whose interpretation
// depends on the specified CA type.
Options map[string]string `json:",omitempty"`
}
// InitRequest is the request used to init a swarm.
type InitRequest struct {
ListenAddr string
AdvertiseAddr string
ForceNewCluster bool
Spec Spec
AutoLockManagers bool
Availability NodeAvailability
}
// JoinRequest is the request used to join a swarm.
type JoinRequest struct {
ListenAddr string
AdvertiseAddr string
RemoteAddrs []string
JoinToken string // accept by secret
Availability NodeAvailability
}
// UnlockRequest is the request used to unlock a swarm.
type UnlockRequest struct {
// UnlockKey is the unlock key in ASCII-armored format.
UnlockKey string
}
// LocalNodeState represents the state of the local node.
type LocalNodeState string
const (
// LocalNodeStateInactive INACTIVE
LocalNodeStateInactive LocalNodeState = "inactive"
// LocalNodeStatePending PENDING
LocalNodeStatePending LocalNodeState = "pending"
// LocalNodeStateActive ACTIVE
LocalNodeStateActive LocalNodeState = "active"
// LocalNodeStateError ERROR
LocalNodeStateError LocalNodeState = "error"
// LocalNodeStateLocked LOCKED
LocalNodeStateLocked LocalNodeState = "locked"
)
// Info represents generic information about swarm.
type Info struct {
NodeID string
NodeAddr string
LocalNodeState LocalNodeState
ControlAvailable bool
Error string
RemoteManagers []Peer
Nodes int
Managers int
Cluster ClusterInfo
}
// Peer represents a peer.
type Peer struct {
NodeID string
Addr string
}
// UpdateFlags contains flags for SwarmUpdate.
type UpdateFlags struct {
RotateWorkerToken bool
RotateManagerToken bool
RotateManagerUnlockKey bool
}

128
vendor/github.com/docker/docker/api/types/swarm/task.go generated vendored Normal file
View File

@@ -0,0 +1,128 @@
package swarm
import "time"
// TaskState represents the state of a task.
type TaskState string
const (
// TaskStateNew NEW
TaskStateNew TaskState = "new"
// TaskStateAllocated ALLOCATED
TaskStateAllocated TaskState = "allocated"
// TaskStatePending PENDING
TaskStatePending TaskState = "pending"
// TaskStateAssigned ASSIGNED
TaskStateAssigned TaskState = "assigned"
// TaskStateAccepted ACCEPTED
TaskStateAccepted TaskState = "accepted"
// TaskStatePreparing PREPARING
TaskStatePreparing TaskState = "preparing"
// TaskStateReady READY
TaskStateReady TaskState = "ready"
// TaskStateStarting STARTING
TaskStateStarting TaskState = "starting"
// TaskStateRunning RUNNING
TaskStateRunning TaskState = "running"
// TaskStateComplete COMPLETE
TaskStateComplete TaskState = "complete"
// TaskStateShutdown SHUTDOWN
TaskStateShutdown TaskState = "shutdown"
// TaskStateFailed FAILED
TaskStateFailed TaskState = "failed"
// TaskStateRejected REJECTED
TaskStateRejected TaskState = "rejected"
)
// Task represents a task.
type Task struct {
ID string
Meta
Annotations
Spec TaskSpec `json:",omitempty"`
ServiceID string `json:",omitempty"`
Slot int `json:",omitempty"`
NodeID string `json:",omitempty"`
Status TaskStatus `json:",omitempty"`
DesiredState TaskState `json:",omitempty"`
NetworksAttachments []NetworkAttachment `json:",omitempty"`
}
// TaskSpec represents the spec of a task.
type TaskSpec struct {
ContainerSpec ContainerSpec `json:",omitempty"`
Resources *ResourceRequirements `json:",omitempty"`
RestartPolicy *RestartPolicy `json:",omitempty"`
Placement *Placement `json:",omitempty"`
Networks []NetworkAttachmentConfig `json:",omitempty"`
// LogDriver specifies the LogDriver to use for tasks created from this
// spec. If not present, the one on cluster default on swarm.Spec will be
// used, finally falling back to the engine default if not specified.
LogDriver *Driver `json:",omitempty"`
// ForceUpdate is a counter that triggers an update even if no relevant
// parameters have been changed.
ForceUpdate uint64
}
// Resources represents resources (CPU/Memory).
type Resources struct {
NanoCPUs int64 `json:",omitempty"`
MemoryBytes int64 `json:",omitempty"`
}
// ResourceRequirements represents resources requirements.
type ResourceRequirements struct {
Limits *Resources `json:",omitempty"`
Reservations *Resources `json:",omitempty"`
}
// Placement represents orchestration parameters.
type Placement struct {
Constraints []string `json:",omitempty"`
}
// RestartPolicy represents the restart policy.
type RestartPolicy struct {
Condition RestartPolicyCondition `json:",omitempty"`
Delay *time.Duration `json:",omitempty"`
MaxAttempts *uint64 `json:",omitempty"`
Window *time.Duration `json:",omitempty"`
}
// RestartPolicyCondition represents when to restart.
type RestartPolicyCondition string
const (
// RestartPolicyConditionNone NONE
RestartPolicyConditionNone RestartPolicyCondition = "none"
// RestartPolicyConditionOnFailure ON_FAILURE
RestartPolicyConditionOnFailure RestartPolicyCondition = "on-failure"
// RestartPolicyConditionAny ANY
RestartPolicyConditionAny RestartPolicyCondition = "any"
)
// TaskStatus represents the status of a task.
type TaskStatus struct {
Timestamp time.Time `json:",omitempty"`
State TaskState `json:",omitempty"`
Message string `json:",omitempty"`
Err string `json:",omitempty"`
ContainerStatus ContainerStatus `json:",omitempty"`
PortStatus PortStatus `json:",omitempty"`
}
// ContainerStatus represents the status of a container.
type ContainerStatus struct {
ContainerID string `json:",omitempty"`
PID int `json:",omitempty"`
ExitCode int `json:",omitempty"`
}
// PortStatus represents the port status of a task's host ports whose
// service has published host ports
type PortStatus struct {
Ports []PortConfig `json:",omitempty"`
}

519
vendor/github.com/docker/docker/api/types/types.go generated vendored Normal file
View File

@@ -0,0 +1,519 @@
package types
import (
"errors"
"fmt"
"io"
"os"
"strings"
"time"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/filters"
"github.com/docker/docker/api/types/mount"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/api/types/registry"
"github.com/docker/docker/api/types/swarm"
"github.com/docker/go-connections/nat"
)
// RootFS returns Image's RootFS description including the layer IDs.
type RootFS struct {
Type string
Layers []string `json:",omitempty"`
BaseLayer string `json:",omitempty"`
}
// ImageInspect contains response of Engine API:
// GET "/images/{name:.*}/json"
type ImageInspect struct {
ID string `json:"Id"`
RepoTags []string
RepoDigests []string
Parent string
Comment string
Created string
Container string
ContainerConfig *container.Config
DockerVersion string
Author string
Config *container.Config
Architecture string
Os string
OsVersion string `json:",omitempty"`
Size int64
VirtualSize int64
GraphDriver GraphDriverData
RootFS RootFS
}
// Container contains response of Engine API:
// GET "/containers/json"
type Container struct {
ID string `json:"Id"`
Names []string
Image string
ImageID string
Command string
Created int64
Ports []Port
SizeRw int64 `json:",omitempty"`
SizeRootFs int64 `json:",omitempty"`
Labels map[string]string
State string
Status string
HostConfig struct {
NetworkMode string `json:",omitempty"`
}
NetworkSettings *SummaryNetworkSettings
Mounts []MountPoint
}
// CopyConfig contains request body of Engine API:
// POST "/containers/"+containerID+"/copy"
type CopyConfig struct {
Resource string
}
// ContainerPathStat is used to encode the header from
// GET "/containers/{name:.*}/archive"
// "Name" is the file or directory name.
type ContainerPathStat struct {
Name string `json:"name"`
Size int64 `json:"size"`
Mode os.FileMode `json:"mode"`
Mtime time.Time `json:"mtime"`
LinkTarget string `json:"linkTarget"`
}
// ContainerStats contains response of Engine API:
// GET "/stats"
type ContainerStats struct {
Body io.ReadCloser `json:"body"`
OSType string `json:"ostype"`
}
// Ping contains response of Engine API:
// GET "/_ping"
type Ping struct {
APIVersion string
Experimental bool
}
// Version contains response of Engine API:
// GET "/version"
type Version struct {
Version string
APIVersion string `json:"ApiVersion"`
MinAPIVersion string `json:"MinAPIVersion,omitempty"`
GitCommit string
GoVersion string
Os string
Arch string
KernelVersion string `json:",omitempty"`
Experimental bool `json:",omitempty"`
BuildTime string `json:",omitempty"`
}
// Commit holds the Git-commit (SHA1) that a binary was built from, as reported
// in the version-string of external tools, such as containerd, or runC.
type Commit struct {
ID string // ID is the actual commit ID of external tool.
Expected string // Expected is the commit ID of external tool expected by dockerd as set at build time.
}
// Info contains response of Engine API:
// GET "/info"
type Info struct {
ID string
Containers int
ContainersRunning int
ContainersPaused int
ContainersStopped int
Images int
Driver string
DriverStatus [][2]string
SystemStatus [][2]string
Plugins PluginsInfo
MemoryLimit bool
SwapLimit bool
KernelMemory bool
CPUCfsPeriod bool `json:"CpuCfsPeriod"`
CPUCfsQuota bool `json:"CpuCfsQuota"`
CPUShares bool
CPUSet bool
IPv4Forwarding bool
BridgeNfIptables bool
BridgeNfIP6tables bool `json:"BridgeNfIp6tables"`
Debug bool
NFd int
OomKillDisable bool
NGoroutines int
SystemTime string
LoggingDriver string
CgroupDriver string
NEventsListener int
KernelVersion string
OperatingSystem string
OSType string
Architecture string
IndexServerAddress string
RegistryConfig *registry.ServiceConfig
NCPU int
MemTotal int64
DockerRootDir string
HTTPProxy string `json:"HttpProxy"`
HTTPSProxy string `json:"HttpsProxy"`
NoProxy string
Name string
Labels []string
ExperimentalBuild bool
ServerVersion string
ClusterStore string
ClusterAdvertise string
Runtimes map[string]Runtime
DefaultRuntime string
Swarm swarm.Info
// LiveRestoreEnabled determines whether containers should be kept
// running when the daemon is shutdown or upon daemon start if
// running containers are detected
LiveRestoreEnabled bool
Isolation container.Isolation
InitBinary string
ContainerdCommit Commit
RuncCommit Commit
InitCommit Commit
SecurityOptions []string
}
// KeyValue holds a key/value pair
type KeyValue struct {
Key, Value string
}
// SecurityOpt contains the name and options of a security option
type SecurityOpt struct {
Name string
Options []KeyValue
}
// DecodeSecurityOptions decodes a security options string slice to a type safe
// SecurityOpt
func DecodeSecurityOptions(opts []string) ([]SecurityOpt, error) {
so := []SecurityOpt{}
for _, opt := range opts {
// support output from a < 1.13 docker daemon
if !strings.Contains(opt, "=") {
so = append(so, SecurityOpt{Name: opt})
continue
}
secopt := SecurityOpt{}
split := strings.Split(opt, ",")
for _, s := range split {
kv := strings.SplitN(s, "=", 2)
if len(kv) != 2 {
return nil, fmt.Errorf("invalid security option %q", s)
}
if kv[0] == "" || kv[1] == "" {
return nil, errors.New("invalid empty security option")
}
if kv[0] == "name" {
secopt.Name = kv[1]
continue
}
secopt.Options = append(secopt.Options, KeyValue{Key: kv[0], Value: kv[1]})
}
so = append(so, secopt)
}
return so, nil
}
// PluginsInfo is a temp struct holding Plugins name
// registered with docker daemon. It is used by Info struct
type PluginsInfo struct {
// List of Volume plugins registered
Volume []string
// List of Network plugins registered
Network []string
// List of Authorization plugins registered
Authorization []string
}
// ExecStartCheck is a temp struct used by execStart
// Config fields is part of ExecConfig in runconfig package
type ExecStartCheck struct {
// ExecStart will first check if it's detached
Detach bool
// Check if there's a tty
Tty bool
}
// HealthcheckResult stores information about a single run of a healthcheck probe
type HealthcheckResult struct {
Start time.Time // Start is the time this check started
End time.Time // End is the time this check ended
ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=reserved (considered unhealthy), else=error running probe
Output string // Output from last check
}
// Health states
const (
NoHealthcheck = "none" // Indicates there is no healthcheck
Starting = "starting" // Starting indicates that the container is not yet ready
Healthy = "healthy" // Healthy indicates that the container is running correctly
Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem
)
// Health stores information about the container's healthcheck results
type Health struct {
Status string // Status is one of Starting, Healthy or Unhealthy
FailingStreak int // FailingStreak is the number of consecutive failures
Log []*HealthcheckResult // Log contains the last few results (oldest first)
}
// ContainerState stores container's running state
// it's part of ContainerJSONBase and will return by "inspect" command
type ContainerState struct {
Status string
Running bool
Paused bool
Restarting bool
OOMKilled bool
Dead bool
Pid int
ExitCode int
Error string
StartedAt string
FinishedAt string
Health *Health `json:",omitempty"`
}
// ContainerNode stores information about the node that a container
// is running on. It's only available in Docker Swarm
type ContainerNode struct {
ID string
IPAddress string `json:"IP"`
Addr string
Name string
Cpus int
Memory int64
Labels map[string]string
}
// ContainerJSONBase contains response of Engine API:
// GET "/containers/{name:.*}/json"
type ContainerJSONBase struct {
ID string `json:"Id"`
Created string
Path string
Args []string
State *ContainerState
Image string
ResolvConfPath string
HostnamePath string
HostsPath string
LogPath string
Node *ContainerNode `json:",omitempty"`
Name string
RestartCount int
Driver string
MountLabel string
ProcessLabel string
AppArmorProfile string
ExecIDs []string
HostConfig *container.HostConfig
GraphDriver GraphDriverData
SizeRw *int64 `json:",omitempty"`
SizeRootFs *int64 `json:",omitempty"`
}
// ContainerJSON is newly used struct along with MountPoint
type ContainerJSON struct {
*ContainerJSONBase
Mounts []MountPoint
Config *container.Config
NetworkSettings *NetworkSettings
}
// NetworkSettings exposes the network settings in the api
type NetworkSettings struct {
NetworkSettingsBase
DefaultNetworkSettings
Networks map[string]*network.EndpointSettings
}
// SummaryNetworkSettings provides a summary of container's networks
// in /containers/json
type SummaryNetworkSettings struct {
Networks map[string]*network.EndpointSettings
}
// NetworkSettingsBase holds basic information about networks
type NetworkSettingsBase struct {
Bridge string // Bridge is the Bridge name the network uses(e.g. `docker0`)
SandboxID string // SandboxID uniquely represents a container's network stack
HairpinMode bool // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface
LinkLocalIPv6Address string // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix
LinkLocalIPv6PrefixLen int // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address
Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port
SandboxKey string // SandboxKey identifies the sandbox
SecondaryIPAddresses []network.Address
SecondaryIPv6Addresses []network.Address
}
// DefaultNetworkSettings holds network information
// during the 2 release deprecation period.
// It will be removed in Docker 1.11.
type DefaultNetworkSettings struct {
EndpointID string // EndpointID uniquely represents a service endpoint in a Sandbox
Gateway string // Gateway holds the gateway address for the network
GlobalIPv6Address string // GlobalIPv6Address holds network's global IPv6 address
GlobalIPv6PrefixLen int // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address
IPAddress string // IPAddress holds the IPv4 address for the network
IPPrefixLen int // IPPrefixLen represents mask length of network's IPv4 address
IPv6Gateway string // IPv6Gateway holds gateway address specific for IPv6
MacAddress string // MacAddress holds the MAC address for the network
}
// MountPoint represents a mount point configuration inside the container.
// This is used for reporting the mountpoints in use by a container.
type MountPoint struct {
Type mount.Type `json:",omitempty"`
Name string `json:",omitempty"`
Source string
Destination string
Driver string `json:",omitempty"`
Mode string
RW bool
Propagation mount.Propagation
}
// NetworkResource is the body of the "get network" http response message
type NetworkResource struct {
Name string // Name is the requested name of the network
ID string `json:"Id"` // ID uniquely identifies a network on a single machine
Created time.Time // Created is the time the network created
Scope string // Scope describes the level at which the network exists (e.g. `global` for cluster-wide or `local` for machine level)
Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`)
EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6
IPAM network.IPAM // IPAM is the network's IP Address Management
Internal bool // Internal represents if the network is used internal only
Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode.
Containers map[string]EndpointResource // Containers contains endpoints belonging to the network
Options map[string]string // Options holds the network specific options to use for when creating the network
Labels map[string]string // Labels holds metadata specific to the network being created
Peers []network.PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network
}
// EndpointResource contains network resources allocated and used for a container in a network
type EndpointResource struct {
Name string
EndpointID string
MacAddress string
IPv4Address string
IPv6Address string
}
// NetworkCreate is the expected body of the "create network" http request message
type NetworkCreate struct {
CheckDuplicate bool
Driver string
EnableIPv6 bool
IPAM *network.IPAM
Internal bool
Attachable bool
Options map[string]string
Labels map[string]string
}
// NetworkCreateRequest is the request message sent to the server for network create call.
type NetworkCreateRequest struct {
NetworkCreate
Name string
}
// NetworkCreateResponse is the response message sent by the server for network create call
type NetworkCreateResponse struct {
ID string `json:"Id"`
Warning string
}
// NetworkConnect represents the data to be used to connect a container to the network
type NetworkConnect struct {
Container string
EndpointConfig *network.EndpointSettings `json:",omitempty"`
}
// NetworkDisconnect represents the data to be used to disconnect a container from the network
type NetworkDisconnect struct {
Container string
Force bool
}
// Checkpoint represents the details of a checkpoint
type Checkpoint struct {
Name string // Name is the name of the checkpoint
}
// Runtime describes an OCI runtime
type Runtime struct {
Path string `json:"path"`
Args []string `json:"runtimeArgs,omitempty"`
}
// DiskUsage contains response of Engine API:
// GET "/system/df"
type DiskUsage struct {
LayersSize int64
Images []*ImageSummary
Containers []*Container
Volumes []*Volume
}
// ContainersPruneReport contains the response for Engine API:
// POST "/containers/prune"
type ContainersPruneReport struct {
ContainersDeleted []string
SpaceReclaimed uint64
}
// VolumesPruneReport contains the response for Engine API:
// POST "/volumes/prune"
type VolumesPruneReport struct {
VolumesDeleted []string
SpaceReclaimed uint64
}
// ImagesPruneReport contains the response for Engine API:
// POST "/images/prune"
type ImagesPruneReport struct {
ImagesDeleted []ImageDeleteResponseItem
SpaceReclaimed uint64
}
// NetworksPruneReport contains the response for Engine API:
// POST "/networks/prune"
type NetworksPruneReport struct {
NetworksDeleted []string
}
// SecretCreateResponse contains the information returned to a client
// on the creation of a new secret.
type SecretCreateResponse struct {
// ID is the id of the created secret.
ID string
}
// SecretListOptions holds parameters to list secrets
type SecretListOptions struct {
Filters filters.Args
}
// PushResult contains the tag, manifest digest, and manifest size from the
// push. It's used to signal this information to the trust code in the client
// so it can sign the manifest if necessary.
type PushResult struct {
Tag string
Digest string
Size int
}

View File

@@ -0,0 +1,14 @@
# Legacy API type versions
This package includes types for legacy API versions. The stable version of the API types live in `api/types/*.go`.
Consider moving a type here when you need to keep backwards compatibility in the API. This legacy types are organized by the latest API version they appear in. For instance, types in the `v1p19` package are valid for API versions below or equal `1.19`. Types in the `v1p20` package are valid for the API version `1.20`, since the versions below that will use the legacy types in `v1p19`.
## Package name conventions
The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention:
1. We cannot use `.` because it's interpreted by the language, think of `v1.20.CallFunction`.
2. We cannot use `_` because golint complains about it. The code is actually valid, but it looks probably more weird: `v1_20.CallFunction`.
For instance, if you want to modify a type that was available in the version `1.21` of the API but it will have different fields in the version `1.22`, you want to create a new package under `api/types/versions/v1p21`.

View File

@@ -0,0 +1,62 @@
package versions
import (
"strconv"
"strings"
)
// compare compares two version strings
// returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise.
func compare(v1, v2 string) int {
var (
currTab = strings.Split(v1, ".")
otherTab = strings.Split(v2, ".")
)
max := len(currTab)
if len(otherTab) > max {
max = len(otherTab)
}
for i := 0; i < max; i++ {
var currInt, otherInt int
if len(currTab) > i {
currInt, _ = strconv.Atoi(currTab[i])
}
if len(otherTab) > i {
otherInt, _ = strconv.Atoi(otherTab[i])
}
if currInt > otherInt {
return 1
}
if otherInt > currInt {
return -1
}
}
return 0
}
// LessThan checks if a version is less than another
func LessThan(v, other string) bool {
return compare(v, other) == -1
}
// LessThanOrEqualTo checks if a version is less than or equal to another
func LessThanOrEqualTo(v, other string) bool {
return compare(v, other) <= 0
}
// GreaterThan checks if a version is greater than another
func GreaterThan(v, other string) bool {
return compare(v, other) == 1
}
// GreaterThanOrEqualTo checks if a version is greater than or equal to another
func GreaterThanOrEqualTo(v, other string) bool {
return compare(v, other) >= 0
}
// Equal checks if a version is equal to another
func Equal(v, other string) bool {
return compare(v, other) == 0
}

58
vendor/github.com/docker/docker/api/types/volume.go generated vendored Normal file
View File

@@ -0,0 +1,58 @@
package types
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// Volume volume
// swagger:model Volume
type Volume struct {
// Name of the volume driver used by the volume.
// Required: true
Driver string `json:"Driver"`
// User-defined key/value metadata.
// Required: true
Labels map[string]string `json:"Labels"`
// Mount path of the volume on the host.
// Required: true
Mountpoint string `json:"Mountpoint"`
// Name of the volume.
// Required: true
Name string `json:"Name"`
// The driver specific options used when creating the volume.
// Required: true
Options map[string]string `json:"Options"`
// The level at which the volume exists. Either `global` for cluster-wide, or `local` for machine level.
// Required: true
Scope string `json:"Scope"`
// Low-level details about the volume, provided by the volume driver.
// Details are returned as a map with key/value pairs:
// `{"key":"value","key2":"value2"}`.
//
// The `Status` field is optional, and is omitted if the volume driver
// does not support this feature.
//
Status map[string]interface{} `json:"Status,omitempty"`
// usage data
UsageData *VolumeUsageData `json:"UsageData,omitempty"`
}
// VolumeUsageData volume usage data
// swagger:model VolumeUsageData
type VolumeUsageData struct {
// The number of containers referencing this volume.
// Required: true
RefCount int64 `json:"RefCount"`
// The disk space used by the volume (local driver only)
// Required: true
Size int64 `json:"Size"`
}

46
vendor/github.com/docker/docker/opts/env.go generated vendored Normal file
View File

@@ -0,0 +1,46 @@
package opts
import (
"fmt"
"os"
"runtime"
"strings"
)
// ValidateEnv validates an environment variable and returns it.
// If no value is specified, it returns the current value using os.Getenv.
//
// As on ParseEnvFile and related to #16585, environment variable names
// are not validate what so ever, it's up to application inside docker
// to validate them or not.
//
// The only validation here is to check if name is empty, per #25099
func ValidateEnv(val string) (string, error) {
arr := strings.Split(val, "=")
if arr[0] == "" {
return "", fmt.Errorf("invalid environment variable: %s", val)
}
if len(arr) > 1 {
return val, nil
}
if !doesEnvExist(val) {
return val, nil
}
return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil
}
func doesEnvExist(name string) bool {
for _, entry := range os.Environ() {
parts := strings.SplitN(entry, "=", 2)
if runtime.GOOS == "windows" {
// Environment variable are case-insensitive on Windows. PaTh, path and PATH are equivalent.
if strings.EqualFold(parts[0], name) {
return true
}
}
if parts[0] == name {
return true
}
}
return false
}

165
vendor/github.com/docker/docker/opts/hosts.go generated vendored Normal file
View File

@@ -0,0 +1,165 @@
package opts
import (
"fmt"
"net"
"net/url"
"strconv"
"strings"
)
var (
// DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. dockerd -H tcp://
// These are the IANA registered port numbers for use with Docker
// see http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker
DefaultHTTPPort = 2375 // Default HTTP Port
// DefaultTLSHTTPPort Default HTTP Port used when TLS enabled
DefaultTLSHTTPPort = 2376 // Default TLS encrypted HTTP Port
// DefaultUnixSocket Path for the unix socket.
// Docker daemon by default always listens on the default unix socket
DefaultUnixSocket = "/var/run/docker.sock"
// DefaultTCPHost constant defines the default host string used by docker on Windows
DefaultTCPHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort)
// DefaultTLSHost constant defines the default host string used by docker for TLS sockets
DefaultTLSHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultTLSHTTPPort)
// DefaultNamedPipe defines the default named pipe used by docker on Windows
DefaultNamedPipe = `//./pipe/docker_engine`
)
// ValidateHost validates that the specified string is a valid host and returns it.
func ValidateHost(val string) (string, error) {
host := strings.TrimSpace(val)
// The empty string means default and is not handled by parseDockerDaemonHost
if host != "" {
_, err := parseDockerDaemonHost(host)
if err != nil {
return val, err
}
}
// Note: unlike most flag validators, we don't return the mutated value here
// we need to know what the user entered later (using ParseHost) to adjust for TLS
return val, nil
}
// ParseHost and set defaults for a Daemon host string
func ParseHost(defaultToTLS bool, val string) (string, error) {
host := strings.TrimSpace(val)
if host == "" {
if defaultToTLS {
host = DefaultTLSHost
} else {
host = DefaultHost
}
} else {
var err error
host, err = parseDockerDaemonHost(host)
if err != nil {
return val, err
}
}
return host, nil
}
// parseDockerDaemonHost parses the specified address and returns an address that will be used as the host.
// Depending of the address specified, this may return one of the global Default* strings defined in hosts.go.
func parseDockerDaemonHost(addr string) (string, error) {
addrParts := strings.SplitN(addr, "://", 2)
if len(addrParts) == 1 && addrParts[0] != "" {
addrParts = []string{"tcp", addrParts[0]}
}
switch addrParts[0] {
case "tcp":
return ParseTCPAddr(addrParts[1], DefaultTCPHost)
case "unix":
return parseSimpleProtoAddr("unix", addrParts[1], DefaultUnixSocket)
case "npipe":
return parseSimpleProtoAddr("npipe", addrParts[1], DefaultNamedPipe)
case "fd":
return addr, nil
default:
return "", fmt.Errorf("Invalid bind address format: %s", addr)
}
}
// parseSimpleProtoAddr parses and validates that the specified address is a valid
// socket address for simple protocols like unix and npipe. It returns a formatted
// socket address, either using the address parsed from addr, or the contents of
// defaultAddr if addr is a blank string.
func parseSimpleProtoAddr(proto, addr, defaultAddr string) (string, error) {
addr = strings.TrimPrefix(addr, proto+"://")
if strings.Contains(addr, "://") {
return "", fmt.Errorf("Invalid proto, expected %s: %s", proto, addr)
}
if addr == "" {
addr = defaultAddr
}
return fmt.Sprintf("%s://%s", proto, addr), nil
}
// ParseTCPAddr parses and validates that the specified address is a valid TCP
// address. It returns a formatted TCP address, either using the address parsed
// from tryAddr, or the contents of defaultAddr if tryAddr is a blank string.
// tryAddr is expected to have already been Trim()'d
// defaultAddr must be in the full `tcp://host:port` form
func ParseTCPAddr(tryAddr string, defaultAddr string) (string, error) {
if tryAddr == "" || tryAddr == "tcp://" {
return defaultAddr, nil
}
addr := strings.TrimPrefix(tryAddr, "tcp://")
if strings.Contains(addr, "://") || addr == "" {
return "", fmt.Errorf("Invalid proto, expected tcp: %s", tryAddr)
}
defaultAddr = strings.TrimPrefix(defaultAddr, "tcp://")
defaultHost, defaultPort, err := net.SplitHostPort(defaultAddr)
if err != nil {
return "", err
}
// url.Parse fails for trailing colon on IPv6 brackets on Go 1.5, but
// not 1.4. See https://github.com/golang/go/issues/12200 and
// https://github.com/golang/go/issues/6530.
if strings.HasSuffix(addr, "]:") {
addr += defaultPort
}
u, err := url.Parse("tcp://" + addr)
if err != nil {
return "", err
}
host, port, err := net.SplitHostPort(u.Host)
if err != nil {
// try port addition once
host, port, err = net.SplitHostPort(net.JoinHostPort(u.Host, defaultPort))
}
if err != nil {
return "", fmt.Errorf("Invalid bind address format: %s", tryAddr)
}
if host == "" {
host = defaultHost
}
if port == "" {
port = defaultPort
}
p, err := strconv.Atoi(port)
if err != nil && p == 0 {
return "", fmt.Errorf("Invalid bind address format: %s", tryAddr)
}
return fmt.Sprintf("tcp://%s%s", net.JoinHostPort(host, port), u.Path), nil
}
// ValidateExtraHost validates that the specified string is a valid extrahost and returns it.
// ExtraHost is in the form of name:ip where the ip has to be a valid ip (IPv4 or IPv6).
func ValidateExtraHost(val string) (string, error) {
// allow for IPv6 addresses in extra hosts by only splitting on first ":"
arr := strings.SplitN(val, ":", 2)
if len(arr) != 2 || len(arr[0]) == 0 {
return "", fmt.Errorf("bad format for add-host: %q", val)
}
if _, err := ValidateIPAddress(arr[1]); err != nil {
return "", fmt.Errorf("invalid IP address in add-host: %q", arr[1])
}
return val, nil
}

8
vendor/github.com/docker/docker/opts/hosts_unix.go generated vendored Normal file
View File

@@ -0,0 +1,8 @@
// +build !windows
package opts
import "fmt"
// DefaultHost constant defines the default host string used by docker on other hosts than Windows
var DefaultHost = fmt.Sprintf("unix://%s", DefaultUnixSocket)

View File

@@ -0,0 +1,6 @@
// +build windows
package opts
// DefaultHost constant defines the default host string used by docker on Windows
var DefaultHost = "npipe://" + DefaultNamedPipe

47
vendor/github.com/docker/docker/opts/ip.go generated vendored Normal file
View File

@@ -0,0 +1,47 @@
package opts
import (
"fmt"
"net"
)
// IPOpt holds an IP. It is used to store values from CLI flags.
type IPOpt struct {
*net.IP
}
// NewIPOpt creates a new IPOpt from a reference net.IP and a
// string representation of an IP. If the string is not a valid
// IP it will fallback to the specified reference.
func NewIPOpt(ref *net.IP, defaultVal string) *IPOpt {
o := &IPOpt{
IP: ref,
}
o.Set(defaultVal)
return o
}
// Set sets an IPv4 or IPv6 address from a given string. If the given
// string is not parseable as an IP address it returns an error.
func (o *IPOpt) Set(val string) error {
ip := net.ParseIP(val)
if ip == nil {
return fmt.Errorf("%s is not an ip address", val)
}
*o.IP = ip
return nil
}
// String returns the IP address stored in the IPOpt. If stored IP is a
// nil pointer, it returns an empty string.
func (o *IPOpt) String() string {
if *o.IP == nil {
return ""
}
return o.IP.String()
}
// Type returns the type of the option
func (o *IPOpt) Type() string {
return "ip"
}

171
vendor/github.com/docker/docker/opts/mount.go generated vendored Normal file
View File

@@ -0,0 +1,171 @@
package opts
import (
"encoding/csv"
"fmt"
"os"
"strconv"
"strings"
mounttypes "github.com/docker/docker/api/types/mount"
"github.com/docker/go-units"
)
// MountOpt is a Value type for parsing mounts
type MountOpt struct {
values []mounttypes.Mount
}
// Set a new mount value
func (m *MountOpt) Set(value string) error {
csvReader := csv.NewReader(strings.NewReader(value))
fields, err := csvReader.Read()
if err != nil {
return err
}
mount := mounttypes.Mount{}
volumeOptions := func() *mounttypes.VolumeOptions {
if mount.VolumeOptions == nil {
mount.VolumeOptions = &mounttypes.VolumeOptions{
Labels: make(map[string]string),
}
}
if mount.VolumeOptions.DriverConfig == nil {
mount.VolumeOptions.DriverConfig = &mounttypes.Driver{}
}
return mount.VolumeOptions
}
bindOptions := func() *mounttypes.BindOptions {
if mount.BindOptions == nil {
mount.BindOptions = new(mounttypes.BindOptions)
}
return mount.BindOptions
}
tmpfsOptions := func() *mounttypes.TmpfsOptions {
if mount.TmpfsOptions == nil {
mount.TmpfsOptions = new(mounttypes.TmpfsOptions)
}
return mount.TmpfsOptions
}
setValueOnMap := func(target map[string]string, value string) {
parts := strings.SplitN(value, "=", 2)
if len(parts) == 1 {
target[value] = ""
} else {
target[parts[0]] = parts[1]
}
}
mount.Type = mounttypes.TypeVolume // default to volume mounts
// Set writable as the default
for _, field := range fields {
parts := strings.SplitN(field, "=", 2)
key := strings.ToLower(parts[0])
if len(parts) == 1 {
switch key {
case "readonly", "ro":
mount.ReadOnly = true
continue
case "volume-nocopy":
volumeOptions().NoCopy = true
continue
}
}
if len(parts) != 2 {
return fmt.Errorf("invalid field '%s' must be a key=value pair", field)
}
value := parts[1]
switch key {
case "type":
mount.Type = mounttypes.Type(strings.ToLower(value))
case "source", "src":
mount.Source = value
case "target", "dst", "destination":
mount.Target = value
case "readonly", "ro":
mount.ReadOnly, err = strconv.ParseBool(value)
if err != nil {
return fmt.Errorf("invalid value for %s: %s", key, value)
}
case "bind-propagation":
bindOptions().Propagation = mounttypes.Propagation(strings.ToLower(value))
case "volume-nocopy":
volumeOptions().NoCopy, err = strconv.ParseBool(value)
if err != nil {
return fmt.Errorf("invalid value for populate: %s", value)
}
case "volume-label":
setValueOnMap(volumeOptions().Labels, value)
case "volume-driver":
volumeOptions().DriverConfig.Name = value
case "volume-opt":
if volumeOptions().DriverConfig.Options == nil {
volumeOptions().DriverConfig.Options = make(map[string]string)
}
setValueOnMap(volumeOptions().DriverConfig.Options, value)
case "tmpfs-size":
sizeBytes, err := units.RAMInBytes(value)
if err != nil {
return fmt.Errorf("invalid value for %s: %s", key, value)
}
tmpfsOptions().SizeBytes = sizeBytes
case "tmpfs-mode":
ui64, err := strconv.ParseUint(value, 8, 32)
if err != nil {
return fmt.Errorf("invalid value for %s: %s", key, value)
}
tmpfsOptions().Mode = os.FileMode(ui64)
default:
return fmt.Errorf("unexpected key '%s' in '%s'", key, field)
}
}
if mount.Type == "" {
return fmt.Errorf("type is required")
}
if mount.Target == "" {
return fmt.Errorf("target is required")
}
if mount.VolumeOptions != nil && mount.Type != mounttypes.TypeVolume {
return fmt.Errorf("cannot mix 'volume-*' options with mount type '%s'", mount.Type)
}
if mount.BindOptions != nil && mount.Type != mounttypes.TypeBind {
return fmt.Errorf("cannot mix 'bind-*' options with mount type '%s'", mount.Type)
}
if mount.TmpfsOptions != nil && mount.Type != mounttypes.TypeTmpfs {
return fmt.Errorf("cannot mix 'tmpfs-*' options with mount type '%s'", mount.Type)
}
m.values = append(m.values, mount)
return nil
}
// Type returns the type of this option
func (m *MountOpt) Type() string {
return "mount"
}
// String returns a string repr of this option
func (m *MountOpt) String() string {
mounts := []string{}
for _, mount := range m.values {
repr := fmt.Sprintf("%s %s %s", mount.Type, mount.Source, mount.Target)
mounts = append(mounts, repr)
}
return strings.Join(mounts, ", ")
}
// Value returns the mounts
func (m *MountOpt) Value() []mounttypes.Mount {
return m.values
}

446
vendor/github.com/docker/docker/opts/opts.go generated vendored Normal file
View File

@@ -0,0 +1,446 @@
package opts
import (
"fmt"
"math/big"
"net"
"path"
"regexp"
"strings"
"github.com/docker/docker/api/types/filters"
units "github.com/docker/go-units"
)
var (
alphaRegexp = regexp.MustCompile(`[a-zA-Z]`)
domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`)
)
// ListOpts holds a list of values and a validation function.
type ListOpts struct {
values *[]string
validator ValidatorFctType
}
// NewListOpts creates a new ListOpts with the specified validator.
func NewListOpts(validator ValidatorFctType) ListOpts {
var values []string
return *NewListOptsRef(&values, validator)
}
// NewListOptsRef creates a new ListOpts with the specified values and validator.
func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts {
return &ListOpts{
values: values,
validator: validator,
}
}
func (opts *ListOpts) String() string {
return fmt.Sprintf("%v", []string((*opts.values)))
}
// Set validates if needed the input value and adds it to the
// internal slice.
func (opts *ListOpts) Set(value string) error {
if opts.validator != nil {
v, err := opts.validator(value)
if err != nil {
return err
}
value = v
}
(*opts.values) = append((*opts.values), value)
return nil
}
// Delete removes the specified element from the slice.
func (opts *ListOpts) Delete(key string) {
for i, k := range *opts.values {
if k == key {
(*opts.values) = append((*opts.values)[:i], (*opts.values)[i+1:]...)
return
}
}
}
// GetMap returns the content of values in a map in order to avoid
// duplicates.
func (opts *ListOpts) GetMap() map[string]struct{} {
ret := make(map[string]struct{})
for _, k := range *opts.values {
ret[k] = struct{}{}
}
return ret
}
// GetAll returns the values of slice.
func (opts *ListOpts) GetAll() []string {
return (*opts.values)
}
// GetAllOrEmpty returns the values of the slice
// or an empty slice when there are no values.
func (opts *ListOpts) GetAllOrEmpty() []string {
v := *opts.values
if v == nil {
return make([]string, 0)
}
return v
}
// Get checks the existence of the specified key.
func (opts *ListOpts) Get(key string) bool {
for _, k := range *opts.values {
if k == key {
return true
}
}
return false
}
// Len returns the amount of element in the slice.
func (opts *ListOpts) Len() int {
return len((*opts.values))
}
// Type returns a string name for this Option type
func (opts *ListOpts) Type() string {
return "list"
}
// WithValidator returns the ListOpts with validator set.
func (opts *ListOpts) WithValidator(validator ValidatorFctType) *ListOpts {
opts.validator = validator
return opts
}
// NamedOption is an interface that list and map options
// with names implement.
type NamedOption interface {
Name() string
}
// NamedListOpts is a ListOpts with a configuration name.
// This struct is useful to keep reference to the assigned
// field name in the internal configuration struct.
type NamedListOpts struct {
name string
ListOpts
}
var _ NamedOption = &NamedListOpts{}
// NewNamedListOptsRef creates a reference to a new NamedListOpts struct.
func NewNamedListOptsRef(name string, values *[]string, validator ValidatorFctType) *NamedListOpts {
return &NamedListOpts{
name: name,
ListOpts: *NewListOptsRef(values, validator),
}
}
// Name returns the name of the NamedListOpts in the configuration.
func (o *NamedListOpts) Name() string {
return o.name
}
// MapOpts holds a map of values and a validation function.
type MapOpts struct {
values map[string]string
validator ValidatorFctType
}
// Set validates if needed the input value and add it to the
// internal map, by splitting on '='.
func (opts *MapOpts) Set(value string) error {
if opts.validator != nil {
v, err := opts.validator(value)
if err != nil {
return err
}
value = v
}
vals := strings.SplitN(value, "=", 2)
if len(vals) == 1 {
(opts.values)[vals[0]] = ""
} else {
(opts.values)[vals[0]] = vals[1]
}
return nil
}
// GetAll returns the values of MapOpts as a map.
func (opts *MapOpts) GetAll() map[string]string {
return opts.values
}
func (opts *MapOpts) String() string {
return fmt.Sprintf("%v", map[string]string((opts.values)))
}
// Type returns a string name for this Option type
func (opts *MapOpts) Type() string {
return "map"
}
// NewMapOpts creates a new MapOpts with the specified map of values and a validator.
func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts {
if values == nil {
values = make(map[string]string)
}
return &MapOpts{
values: values,
validator: validator,
}
}
// NamedMapOpts is a MapOpts struct with a configuration name.
// This struct is useful to keep reference to the assigned
// field name in the internal configuration struct.
type NamedMapOpts struct {
name string
MapOpts
}
var _ NamedOption = &NamedMapOpts{}
// NewNamedMapOpts creates a reference to a new NamedMapOpts struct.
func NewNamedMapOpts(name string, values map[string]string, validator ValidatorFctType) *NamedMapOpts {
return &NamedMapOpts{
name: name,
MapOpts: *NewMapOpts(values, validator),
}
}
// Name returns the name of the NamedMapOpts in the configuration.
func (o *NamedMapOpts) Name() string {
return o.name
}
// ValidatorFctType defines a validator function that returns a validated string and/or an error.
type ValidatorFctType func(val string) (string, error)
// ValidatorFctListType defines a validator function that returns a validated list of string and/or an error
type ValidatorFctListType func(val string) ([]string, error)
// ValidateIPAddress validates an Ip address.
func ValidateIPAddress(val string) (string, error) {
var ip = net.ParseIP(strings.TrimSpace(val))
if ip != nil {
return ip.String(), nil
}
return "", fmt.Errorf("%s is not an ip address", val)
}
// ValidateMACAddress validates a MAC address.
func ValidateMACAddress(val string) (string, error) {
_, err := net.ParseMAC(strings.TrimSpace(val))
if err != nil {
return "", err
}
return val, nil
}
// ValidateDNSSearch validates domain for resolvconf search configuration.
// A zero length domain is represented by a dot (.).
func ValidateDNSSearch(val string) (string, error) {
if val = strings.Trim(val, " "); val == "." {
return val, nil
}
return validateDomain(val)
}
func validateDomain(val string) (string, error) {
if alphaRegexp.FindString(val) == "" {
return "", fmt.Errorf("%s is not a valid domain", val)
}
ns := domainRegexp.FindSubmatch([]byte(val))
if len(ns) > 0 && len(ns[1]) < 255 {
return string(ns[1]), nil
}
return "", fmt.Errorf("%s is not a valid domain", val)
}
// ValidateLabel validates that the specified string is a valid label, and returns it.
// Labels are in the form on key=value.
func ValidateLabel(val string) (string, error) {
if strings.Count(val, "=") < 1 {
return "", fmt.Errorf("bad attribute format: %s", val)
}
return val, nil
}
// ValidateSysctl validates a sysctl and returns it.
func ValidateSysctl(val string) (string, error) {
validSysctlMap := map[string]bool{
"kernel.msgmax": true,
"kernel.msgmnb": true,
"kernel.msgmni": true,
"kernel.sem": true,
"kernel.shmall": true,
"kernel.shmmax": true,
"kernel.shmmni": true,
"kernel.shm_rmid_forced": true,
}
validSysctlPrefixes := []string{
"net.",
"fs.mqueue.",
}
arr := strings.Split(val, "=")
if len(arr) < 2 {
return "", fmt.Errorf("sysctl '%s' is not whitelisted", val)
}
if validSysctlMap[arr[0]] {
return val, nil
}
for _, vp := range validSysctlPrefixes {
if strings.HasPrefix(arr[0], vp) {
return val, nil
}
}
return "", fmt.Errorf("sysctl '%s' is not whitelisted", val)
}
// FilterOpt is a flag type for validating filters
type FilterOpt struct {
filter filters.Args
}
// NewFilterOpt returns a new FilterOpt
func NewFilterOpt() FilterOpt {
return FilterOpt{filter: filters.NewArgs()}
}
func (o *FilterOpt) String() string {
repr, err := filters.ToParam(o.filter)
if err != nil {
return "invalid filters"
}
return repr
}
// Set sets the value of the opt by parsing the command line value
func (o *FilterOpt) Set(value string) error {
var err error
o.filter, err = filters.ParseFlag(value, o.filter)
return err
}
// Type returns the option type
func (o *FilterOpt) Type() string {
return "filter"
}
// Value returns the value of this option
func (o *FilterOpt) Value() filters.Args {
return o.filter
}
// NanoCPUs is a type for fixed point fractional number.
type NanoCPUs int64
// String returns the string format of the number
func (c *NanoCPUs) String() string {
return big.NewRat(c.Value(), 1e9).FloatString(3)
}
// Set sets the value of the NanoCPU by passing a string
func (c *NanoCPUs) Set(value string) error {
cpus, err := ParseCPUs(value)
*c = NanoCPUs(cpus)
return err
}
// Type returns the type
func (c *NanoCPUs) Type() string {
return "decimal"
}
// Value returns the value in int64
func (c *NanoCPUs) Value() int64 {
return int64(*c)
}
// ParseCPUs takes a string ratio and returns an integer value of nano cpus
func ParseCPUs(value string) (int64, error) {
cpu, ok := new(big.Rat).SetString(value)
if !ok {
return 0, fmt.Errorf("failed to parse %v as a rational number", value)
}
nano := cpu.Mul(cpu, big.NewRat(1e9, 1))
if !nano.IsInt() {
return 0, fmt.Errorf("value is too precise")
}
return nano.Num().Int64(), nil
}
// ParseLink parses and validates the specified string as a link format (name:alias)
func ParseLink(val string) (string, string, error) {
if val == "" {
return "", "", fmt.Errorf("empty string specified for links")
}
arr := strings.Split(val, ":")
if len(arr) > 2 {
return "", "", fmt.Errorf("bad format for links: %s", val)
}
if len(arr) == 1 {
return val, val, nil
}
// This is kept because we can actually get a HostConfig with links
// from an already created container and the format is not `foo:bar`
// but `/foo:/c1/bar`
if strings.HasPrefix(arr[0], "/") {
_, alias := path.Split(arr[1])
return arr[0][1:], alias, nil
}
return arr[0], arr[1], nil
}
// ValidateLink validates that the specified string has a valid link format (containerName:alias).
func ValidateLink(val string) (string, error) {
_, _, err := ParseLink(val)
return val, err
}
// MemBytes is a type for human readable memory bytes (like 128M, 2g, etc)
type MemBytes int64
// String returns the string format of the human readable memory bytes
func (m *MemBytes) String() string {
// NOTE: In spf13/pflag/flag.go, "0" is considered as "zero value" while "0 B" is not.
// We return "0" in case value is 0 here so that the default value is hidden.
// (Sometimes "default 0 B" is actually misleading)
if m.Value() != 0 {
return units.BytesSize(float64(m.Value()))
}
return "0"
}
// Set sets the value of the MemBytes by passing a string
func (m *MemBytes) Set(value string) error {
val, err := units.RAMInBytes(value)
*m = MemBytes(val)
return err
}
// Type returns the type
func (m *MemBytes) Type() string {
return "bytes"
}
// Value returns the value in int64
func (m *MemBytes) Value() int64 {
return int64(*m)
}
// UnmarshalJSON is the customized unmarshaler for MemBytes
func (m *MemBytes) UnmarshalJSON(s []byte) error {
if len(s) <= 2 || s[0] != '"' || s[len(s)-1] != '"' {
return fmt.Errorf("invalid size: %q", s)
}
val, err := units.RAMInBytes(string(s[1 : len(s)-1]))
*m = MemBytes(val)
return err
}

6
vendor/github.com/docker/docker/opts/opts_unix.go generated vendored Normal file
View File

@@ -0,0 +1,6 @@
// +build !windows
package opts
// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. dockerd -H tcp://:8080
const DefaultHTTPHost = "localhost"

56
vendor/github.com/docker/docker/opts/opts_windows.go generated vendored Normal file
View File

@@ -0,0 +1,56 @@
package opts
// TODO Windows. Identify bug in GOLang 1.5.1+ and/or Windows Server 2016 TP5.
// @jhowardmsft, @swernli.
//
// On Windows, this mitigates a problem with the default options of running
// a docker client against a local docker daemon on TP5.
//
// What was found that if the default host is "localhost", even if the client
// (and daemon as this is local) is not physically on a network, and the DNS
// cache is flushed (ipconfig /flushdns), then the client will pause for
// exactly one second when connecting to the daemon for calls. For example
// using docker run windowsservercore cmd, the CLI will send a create followed
// by an attach. You see the delay between the attach finishing and the attach
// being seen by the daemon.
//
// Here's some daemon debug logs with additional debug spew put in. The
// AfterWriteJSON log is the very last thing the daemon does as part of the
// create call. The POST /attach is the second CLI call. Notice the second
// time gap.
//
// time="2015-11-06T13:38:37.259627400-08:00" level=debug msg="After createRootfs"
// time="2015-11-06T13:38:37.263626300-08:00" level=debug msg="After setHostConfig"
// time="2015-11-06T13:38:37.267631200-08:00" level=debug msg="before createContainerPl...."
// time="2015-11-06T13:38:37.271629500-08:00" level=debug msg=ToDiskLocking....
// time="2015-11-06T13:38:37.275643200-08:00" level=debug msg="loggin event...."
// time="2015-11-06T13:38:37.277627600-08:00" level=debug msg="logged event...."
// time="2015-11-06T13:38:37.279631800-08:00" level=debug msg="In defer func"
// time="2015-11-06T13:38:37.282628100-08:00" level=debug msg="After daemon.create"
// time="2015-11-06T13:38:37.286651700-08:00" level=debug msg="return 2"
// time="2015-11-06T13:38:37.289629500-08:00" level=debug msg="Returned from daemon.ContainerCreate"
// time="2015-11-06T13:38:37.311629100-08:00" level=debug msg="After WriteJSON"
// ... 1 second gap here....
// time="2015-11-06T13:38:38.317866200-08:00" level=debug msg="Calling POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach"
// time="2015-11-06T13:38:38.326882500-08:00" level=info msg="POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach?stderr=1&stdin=1&stdout=1&stream=1"
//
// We suspect this is either a bug introduced in GOLang 1.5.1, or that a change
// in GOLang 1.5.1 (from 1.4.3) is exposing a bug in Windows. In theory,
// the Windows networking stack is supposed to resolve "localhost" internally,
// without hitting DNS, or even reading the hosts file (which is why localhost
// is commented out in the hosts file on Windows).
//
// We have validated that working around this using the actual IPv4 localhost
// address does not cause the delay.
//
// This does not occur with the docker client built with 1.4.3 on the same
// Windows build, regardless of whether the daemon is built using 1.5.1
// or 1.4.3. It does not occur on Linux. We also verified we see the same thing
// on a cross-compiled Windows binary (from Linux).
//
// Final note: This is a mitigation, not a 'real' fix. It is still susceptible
// to the delay if a user were to do 'docker run -H=tcp://localhost:2375...'
// explicitly.
// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. dockerd -H tcp://:8080
const DefaultHTTPHost = "127.0.0.1"

162
vendor/github.com/docker/docker/opts/port.go generated vendored Normal file
View File

@@ -0,0 +1,162 @@
package opts
import (
"encoding/csv"
"fmt"
"regexp"
"strconv"
"strings"
"github.com/docker/docker/api/types/swarm"
"github.com/docker/go-connections/nat"
)
const (
portOptTargetPort = "target"
portOptPublishedPort = "published"
portOptProtocol = "protocol"
portOptMode = "mode"
)
// PortOpt represents a port config in swarm mode.
type PortOpt struct {
ports []swarm.PortConfig
}
// Set a new port value
func (p *PortOpt) Set(value string) error {
longSyntax, err := regexp.MatchString(`\w+=\w+(,\w+=\w+)*`, value)
if err != nil {
return err
}
if longSyntax {
csvReader := csv.NewReader(strings.NewReader(value))
fields, err := csvReader.Read()
if err != nil {
return err
}
pConfig := swarm.PortConfig{}
for _, field := range fields {
parts := strings.SplitN(field, "=", 2)
if len(parts) != 2 {
return fmt.Errorf("invalid field %s", field)
}
key := strings.ToLower(parts[0])
value := strings.ToLower(parts[1])
switch key {
case portOptProtocol:
if value != string(swarm.PortConfigProtocolTCP) && value != string(swarm.PortConfigProtocolUDP) {
return fmt.Errorf("invalid protocol value %s", value)
}
pConfig.Protocol = swarm.PortConfigProtocol(value)
case portOptMode:
if value != string(swarm.PortConfigPublishModeIngress) && value != string(swarm.PortConfigPublishModeHost) {
return fmt.Errorf("invalid publish mode value %s", value)
}
pConfig.PublishMode = swarm.PortConfigPublishMode(value)
case portOptTargetPort:
tPort, err := strconv.ParseUint(value, 10, 16)
if err != nil {
return err
}
pConfig.TargetPort = uint32(tPort)
case portOptPublishedPort:
pPort, err := strconv.ParseUint(value, 10, 16)
if err != nil {
return err
}
pConfig.PublishedPort = uint32(pPort)
default:
return fmt.Errorf("invalid field key %s", key)
}
}
if pConfig.TargetPort == 0 {
return fmt.Errorf("missing mandatory field %q", portOptTargetPort)
}
if pConfig.PublishMode == "" {
pConfig.PublishMode = swarm.PortConfigPublishModeIngress
}
if pConfig.Protocol == "" {
pConfig.Protocol = swarm.PortConfigProtocolTCP
}
p.ports = append(p.ports, pConfig)
} else {
// short syntax
portConfigs := []swarm.PortConfig{}
ports, portBindingMap, err := nat.ParsePortSpecs([]string{value})
if err != nil {
return err
}
for _, portBindings := range portBindingMap {
for _, portBinding := range portBindings {
if portBinding.HostIP != "" {
return fmt.Errorf("HostIP is not supported.")
}
}
}
for port := range ports {
portConfig, err := ConvertPortToPortConfig(port, portBindingMap)
if err != nil {
return err
}
portConfigs = append(portConfigs, portConfig...)
}
p.ports = append(p.ports, portConfigs...)
}
return nil
}
// Type returns the type of this option
func (p *PortOpt) Type() string {
return "port"
}
// String returns a string repr of this option
func (p *PortOpt) String() string {
ports := []string{}
for _, port := range p.ports {
repr := fmt.Sprintf("%v:%v/%s/%s", port.PublishedPort, port.TargetPort, port.Protocol, port.PublishMode)
ports = append(ports, repr)
}
return strings.Join(ports, ", ")
}
// Value returns the ports
func (p *PortOpt) Value() []swarm.PortConfig {
return p.ports
}
// ConvertPortToPortConfig converts ports to the swarm type
func ConvertPortToPortConfig(
port nat.Port,
portBindings map[nat.Port][]nat.PortBinding,
) ([]swarm.PortConfig, error) {
ports := []swarm.PortConfig{}
for _, binding := range portBindings[port] {
hostPort, err := strconv.ParseUint(binding.HostPort, 10, 16)
if err != nil && binding.HostPort != "" {
return nil, fmt.Errorf("invalid hostport binding (%s) for port (%s)", binding.HostPort, port.Port())
}
ports = append(ports, swarm.PortConfig{
//TODO Name: ?
Protocol: swarm.PortConfigProtocol(strings.ToLower(port.Proto())),
TargetPort: uint32(port.Int()),
PublishedPort: uint32(hostPort),
PublishMode: swarm.PortConfigPublishModeIngress,
})
}
return ports, nil
}

37
vendor/github.com/docker/docker/opts/quotedstring.go generated vendored Normal file
View File

@@ -0,0 +1,37 @@
package opts
// QuotedString is a string that may have extra quotes around the value. The
// quotes are stripped from the value.
type QuotedString struct {
value *string
}
// Set sets a new value
func (s *QuotedString) Set(val string) error {
*s.value = trimQuotes(val)
return nil
}
// Type returns the type of the value
func (s *QuotedString) Type() string {
return "string"
}
func (s *QuotedString) String() string {
return string(*s.value)
}
func trimQuotes(value string) string {
lastIndex := len(value) - 1
for _, char := range []byte{'\'', '"'} {
if value[0] == char && value[lastIndex] == char {
return value[1:lastIndex]
}
}
return value
}
// NewQuotedString returns a new quoted string option
func NewQuotedString(value *string) *QuotedString {
return &QuotedString{value: value}
}

79
vendor/github.com/docker/docker/opts/runtime.go generated vendored Normal file
View File

@@ -0,0 +1,79 @@
package opts
import (
"fmt"
"strings"
"github.com/docker/docker/api/types"
)
// RuntimeOpt defines a map of Runtimes
type RuntimeOpt struct {
name string
stockRuntimeName string
values *map[string]types.Runtime
}
// NewNamedRuntimeOpt creates a new RuntimeOpt
func NewNamedRuntimeOpt(name string, ref *map[string]types.Runtime, stockRuntime string) *RuntimeOpt {
if ref == nil {
ref = &map[string]types.Runtime{}
}
return &RuntimeOpt{name: name, values: ref, stockRuntimeName: stockRuntime}
}
// Name returns the name of the NamedListOpts in the configuration.
func (o *RuntimeOpt) Name() string {
return o.name
}
// Set validates and updates the list of Runtimes
func (o *RuntimeOpt) Set(val string) error {
parts := strings.SplitN(val, "=", 2)
if len(parts) != 2 {
return fmt.Errorf("invalid runtime argument: %s", val)
}
parts[0] = strings.TrimSpace(parts[0])
parts[1] = strings.TrimSpace(parts[1])
if parts[0] == "" || parts[1] == "" {
return fmt.Errorf("invalid runtime argument: %s", val)
}
parts[0] = strings.ToLower(parts[0])
if parts[0] == o.stockRuntimeName {
return fmt.Errorf("runtime name '%s' is reserved", o.stockRuntimeName)
}
if _, ok := (*o.values)[parts[0]]; ok {
return fmt.Errorf("runtime '%s' was already defined", parts[0])
}
(*o.values)[parts[0]] = types.Runtime{Path: parts[1]}
return nil
}
// String returns Runtime values as a string.
func (o *RuntimeOpt) String() string {
var out []string
for k := range *o.values {
out = append(out, k)
}
return fmt.Sprintf("%v", out)
}
// GetMap returns a map of Runtimes (name: path)
func (o *RuntimeOpt) GetMap() map[string]types.Runtime {
if o.values != nil {
return *o.values
}
return map[string]types.Runtime{}
}
// Type returns the type of the option
func (o *RuntimeOpt) Type() string {
return "runtime"
}

107
vendor/github.com/docker/docker/opts/secret.go generated vendored Normal file
View File

@@ -0,0 +1,107 @@
package opts
import (
"encoding/csv"
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/docker/docker/api/types"
)
// SecretOpt is a Value type for parsing secrets
type SecretOpt struct {
values []*types.SecretRequestOption
}
// Set a new secret value
func (o *SecretOpt) Set(value string) error {
csvReader := csv.NewReader(strings.NewReader(value))
fields, err := csvReader.Read()
if err != nil {
return err
}
options := &types.SecretRequestOption{
Source: "",
Target: "",
UID: "0",
GID: "0",
Mode: 0444,
}
// support a simple syntax of --secret foo
if len(fields) == 1 {
options.Source = fields[0]
options.Target = fields[0]
o.values = append(o.values, options)
return nil
}
for _, field := range fields {
parts := strings.SplitN(field, "=", 2)
key := strings.ToLower(parts[0])
if len(parts) != 2 {
return fmt.Errorf("invalid field '%s' must be a key=value pair", field)
}
value := parts[1]
switch key {
case "source", "src":
options.Source = value
case "target":
tDir, _ := filepath.Split(value)
if tDir != "" {
return fmt.Errorf("target must not be a path")
}
options.Target = value
case "uid":
options.UID = value
case "gid":
options.GID = value
case "mode":
m, err := strconv.ParseUint(value, 0, 32)
if err != nil {
return fmt.Errorf("invalid mode specified: %v", err)
}
options.Mode = os.FileMode(m)
default:
if len(fields) == 1 && value == "" {
} else {
return fmt.Errorf("invalid field in secret request: %s", key)
}
}
}
if options.Source == "" {
return fmt.Errorf("source is required")
}
o.values = append(o.values, options)
return nil
}
// Type returns the type of this option
func (o *SecretOpt) Type() string {
return "secret"
}
// String returns a string repr of this option
func (o *SecretOpt) String() string {
secrets := []string{}
for _, secret := range o.values {
repr := fmt.Sprintf("%s -> %s", secret.Source, secret.Target)
secrets = append(secrets, repr)
}
return strings.Join(secrets, ", ")
}
// Value returns the secret requests
func (o *SecretOpt) Value() []*types.SecretRequestOption {
return o.values
}

111
vendor/github.com/docker/docker/opts/throttledevice.go generated vendored Normal file
View File

@@ -0,0 +1,111 @@
package opts
import (
"fmt"
"strconv"
"strings"
"github.com/docker/docker/api/types/blkiodev"
"github.com/docker/go-units"
)
// ValidatorThrottleFctType defines a validator function that returns a validated struct and/or an error.
type ValidatorThrottleFctType func(val string) (*blkiodev.ThrottleDevice, error)
// ValidateThrottleBpsDevice validates that the specified string has a valid device-rate format.
func ValidateThrottleBpsDevice(val string) (*blkiodev.ThrottleDevice, error) {
split := strings.SplitN(val, ":", 2)
if len(split) != 2 {
return nil, fmt.Errorf("bad format: %s", val)
}
if !strings.HasPrefix(split[0], "/dev/") {
return nil, fmt.Errorf("bad format for device path: %s", val)
}
rate, err := units.RAMInBytes(split[1])
if err != nil {
return nil, fmt.Errorf("invalid rate for device: %s. The correct format is <device-path>:<number>[<unit>]. Number must be a positive integer. Unit is optional and can be kb, mb, or gb", val)
}
if rate < 0 {
return nil, fmt.Errorf("invalid rate for device: %s. The correct format is <device-path>:<number>[<unit>]. Number must be a positive integer. Unit is optional and can be kb, mb, or gb", val)
}
return &blkiodev.ThrottleDevice{
Path: split[0],
Rate: uint64(rate),
}, nil
}
// ValidateThrottleIOpsDevice validates that the specified string has a valid device-rate format.
func ValidateThrottleIOpsDevice(val string) (*blkiodev.ThrottleDevice, error) {
split := strings.SplitN(val, ":", 2)
if len(split) != 2 {
return nil, fmt.Errorf("bad format: %s", val)
}
if !strings.HasPrefix(split[0], "/dev/") {
return nil, fmt.Errorf("bad format for device path: %s", val)
}
rate, err := strconv.ParseUint(split[1], 10, 64)
if err != nil {
return nil, fmt.Errorf("invalid rate for device: %s. The correct format is <device-path>:<number>. Number must be a positive integer", val)
}
if rate < 0 {
return nil, fmt.Errorf("invalid rate for device: %s. The correct format is <device-path>:<number>. Number must be a positive integer", val)
}
return &blkiodev.ThrottleDevice{
Path: split[0],
Rate: uint64(rate),
}, nil
}
// ThrottledeviceOpt defines a map of ThrottleDevices
type ThrottledeviceOpt struct {
values []*blkiodev.ThrottleDevice
validator ValidatorThrottleFctType
}
// NewThrottledeviceOpt creates a new ThrottledeviceOpt
func NewThrottledeviceOpt(validator ValidatorThrottleFctType) ThrottledeviceOpt {
values := []*blkiodev.ThrottleDevice{}
return ThrottledeviceOpt{
values: values,
validator: validator,
}
}
// Set validates a ThrottleDevice and sets its name as a key in ThrottledeviceOpt
func (opt *ThrottledeviceOpt) Set(val string) error {
var value *blkiodev.ThrottleDevice
if opt.validator != nil {
v, err := opt.validator(val)
if err != nil {
return err
}
value = v
}
(opt.values) = append((opt.values), value)
return nil
}
// String returns ThrottledeviceOpt values as a string.
func (opt *ThrottledeviceOpt) String() string {
var out []string
for _, v := range opt.values {
out = append(out, v.String())
}
return fmt.Sprintf("%v", out)
}
// GetList returns a slice of pointers to ThrottleDevices.
func (opt *ThrottledeviceOpt) GetList() []*blkiodev.ThrottleDevice {
var throttledevice []*blkiodev.ThrottleDevice
throttledevice = append(throttledevice, opt.values...)
return throttledevice
}
// Type returns the option type
func (opt *ThrottledeviceOpt) Type() string {
return "list"
}

57
vendor/github.com/docker/docker/opts/ulimit.go generated vendored Normal file
View File

@@ -0,0 +1,57 @@
package opts
import (
"fmt"
"github.com/docker/go-units"
)
// UlimitOpt defines a map of Ulimits
type UlimitOpt struct {
values *map[string]*units.Ulimit
}
// NewUlimitOpt creates a new UlimitOpt
func NewUlimitOpt(ref *map[string]*units.Ulimit) *UlimitOpt {
if ref == nil {
ref = &map[string]*units.Ulimit{}
}
return &UlimitOpt{ref}
}
// Set validates a Ulimit and sets its name as a key in UlimitOpt
func (o *UlimitOpt) Set(val string) error {
l, err := units.ParseUlimit(val)
if err != nil {
return err
}
(*o.values)[l.Name] = l
return nil
}
// String returns Ulimit values as a string.
func (o *UlimitOpt) String() string {
var out []string
for _, v := range *o.values {
out = append(out, v.String())
}
return fmt.Sprintf("%v", out)
}
// GetList returns a slice of pointers to Ulimits.
func (o *UlimitOpt) GetList() []*units.Ulimit {
var ulimits []*units.Ulimit
for _, v := range *o.values {
ulimits = append(ulimits, v)
}
return ulimits
}
// Type returns the option type
func (o *UlimitOpt) Type() string {
return "ulimit"
}

89
vendor/github.com/docker/docker/opts/weightdevice.go generated vendored Normal file
View File

@@ -0,0 +1,89 @@
package opts
import (
"fmt"
"strconv"
"strings"
"github.com/docker/docker/api/types/blkiodev"
)
// ValidatorWeightFctType defines a validator function that returns a validated struct and/or an error.
type ValidatorWeightFctType func(val string) (*blkiodev.WeightDevice, error)
// ValidateWeightDevice validates that the specified string has a valid device-weight format.
func ValidateWeightDevice(val string) (*blkiodev.WeightDevice, error) {
split := strings.SplitN(val, ":", 2)
if len(split) != 2 {
return nil, fmt.Errorf("bad format: %s", val)
}
if !strings.HasPrefix(split[0], "/dev/") {
return nil, fmt.Errorf("bad format for device path: %s", val)
}
weight, err := strconv.ParseUint(split[1], 10, 0)
if err != nil {
return nil, fmt.Errorf("invalid weight for device: %s", val)
}
if weight > 0 && (weight < 10 || weight > 1000) {
return nil, fmt.Errorf("invalid weight for device: %s", val)
}
return &blkiodev.WeightDevice{
Path: split[0],
Weight: uint16(weight),
}, nil
}
// WeightdeviceOpt defines a map of WeightDevices
type WeightdeviceOpt struct {
values []*blkiodev.WeightDevice
validator ValidatorWeightFctType
}
// NewWeightdeviceOpt creates a new WeightdeviceOpt
func NewWeightdeviceOpt(validator ValidatorWeightFctType) WeightdeviceOpt {
values := []*blkiodev.WeightDevice{}
return WeightdeviceOpt{
values: values,
validator: validator,
}
}
// Set validates a WeightDevice and sets its name as a key in WeightdeviceOpt
func (opt *WeightdeviceOpt) Set(val string) error {
var value *blkiodev.WeightDevice
if opt.validator != nil {
v, err := opt.validator(val)
if err != nil {
return err
}
value = v
}
(opt.values) = append((opt.values), value)
return nil
}
// String returns WeightdeviceOpt values as a string.
func (opt *WeightdeviceOpt) String() string {
var out []string
for _, v := range opt.values {
out = append(out, v.String())
}
return fmt.Sprintf("%v", out)
}
// GetList returns a slice of pointers to WeightDevices.
func (opt *WeightdeviceOpt) GetList() []*blkiodev.WeightDevice {
var weightdevice []*blkiodev.WeightDevice
for _, v := range opt.values {
weightdevice = append(weightdevice, v)
}
return weightdevice
}
// Type returns the option type
func (opt *WeightdeviceOpt) Type() string {
return "list"
}

View File

@@ -0,0 +1 @@
This code provides helper functions for dealing with archive files.

1175
vendor/github.com/docker/docker/pkg/archive/archive.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,92 @@
package archive
import (
"archive/tar"
"os"
"path/filepath"
"strings"
"syscall"
"github.com/docker/docker/pkg/system"
)
func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter {
if format == OverlayWhiteoutFormat {
return overlayWhiteoutConverter{}
}
return nil
}
type overlayWhiteoutConverter struct{}
func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) (wo *tar.Header, err error) {
// convert whiteouts to AUFS format
if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 {
// we just rename the file and make it normal
dir, filename := filepath.Split(hdr.Name)
hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename)
hdr.Mode = 0600
hdr.Typeflag = tar.TypeReg
hdr.Size = 0
}
if fi.Mode()&os.ModeDir != 0 {
// convert opaque dirs to AUFS format by writing an empty file with the prefix
opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque")
if err != nil {
return nil, err
}
if len(opaque) == 1 && opaque[0] == 'y' {
if hdr.Xattrs != nil {
delete(hdr.Xattrs, "trusted.overlay.opaque")
}
// create a header for the whiteout file
// it should inherit some properties from the parent, but be a regular file
wo = &tar.Header{
Typeflag: tar.TypeReg,
Mode: hdr.Mode & int64(os.ModePerm),
Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir),
Size: 0,
Uid: hdr.Uid,
Uname: hdr.Uname,
Gid: hdr.Gid,
Gname: hdr.Gname,
AccessTime: hdr.AccessTime,
ChangeTime: hdr.ChangeTime,
}
}
}
return
}
func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) {
base := filepath.Base(path)
dir := filepath.Dir(path)
// if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay
if base == WhiteoutOpaqueDir {
err := syscall.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0)
// don't write the file itself
return false, err
}
// if a file was deleted and we are using overlay, we need to create a character device
if strings.HasPrefix(base, WhiteoutPrefix) {
originalBase := base[len(WhiteoutPrefix):]
originalPath := filepath.Join(dir, originalBase)
if err := syscall.Mknod(originalPath, syscall.S_IFCHR, 0); err != nil {
return false, err
}
if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil {
return false, err
}
// don't write the file itself
return false, nil
}
return true, nil
}

View File

@@ -0,0 +1,7 @@
// +build !linux
package archive
func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter {
return nil
}

View File

@@ -0,0 +1,118 @@
// +build !windows
package archive
import (
"archive/tar"
"errors"
"os"
"path/filepath"
"syscall"
"github.com/docker/docker/pkg/system"
rsystem "github.com/opencontainers/runc/libcontainer/system"
)
// fixVolumePathPrefix does platform specific processing to ensure that if
// the path being passed in is not in a volume path format, convert it to one.
func fixVolumePathPrefix(srcPath string) string {
return srcPath
}
// getWalkRoot calculates the root path when performing a TarWithOptions.
// We use a separate function as this is platform specific. On Linux, we
// can't use filepath.Join(srcPath,include) because this will clean away
// a trailing "." or "/" which may be important.
func getWalkRoot(srcPath string, include string) string {
return srcPath + string(filepath.Separator) + include
}
// CanonicalTarNameForPath returns platform-specific filepath
// to canonical posix-style path for tar archival. p is relative
// path.
func CanonicalTarNameForPath(p string) (string, error) {
return p, nil // already unix-style
}
// chmodTarEntry is used to adjust the file permissions used in tar header based
// on the platform the archival is done.
func chmodTarEntry(perm os.FileMode) os.FileMode {
return perm // noop for unix as golang APIs provide perm bits correctly
}
func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) {
s, ok := stat.(*syscall.Stat_t)
if !ok {
err = errors.New("cannot convert stat value to syscall.Stat_t")
return
}
inode = uint64(s.Ino)
// Currently go does not fill in the major/minors
if s.Mode&syscall.S_IFBLK != 0 ||
s.Mode&syscall.S_IFCHR != 0 {
hdr.Devmajor = int64(major(uint64(s.Rdev)))
hdr.Devminor = int64(minor(uint64(s.Rdev)))
}
return
}
func getFileUIDGID(stat interface{}) (int, int, error) {
s, ok := stat.(*syscall.Stat_t)
if !ok {
return -1, -1, errors.New("cannot convert stat value to syscall.Stat_t")
}
return int(s.Uid), int(s.Gid), nil
}
func major(device uint64) uint64 {
return (device >> 8) & 0xfff
}
func minor(device uint64) uint64 {
return (device & 0xff) | ((device >> 12) & 0xfff00)
}
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
// createTarFile to handle the following types of header: Block; Char; Fifo
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
if rsystem.RunningInUserNS() {
// cannot create a device if running in user namespace
return nil
}
mode := uint32(hdr.Mode & 07777)
switch hdr.Typeflag {
case tar.TypeBlock:
mode |= syscall.S_IFBLK
case tar.TypeChar:
mode |= syscall.S_IFCHR
case tar.TypeFifo:
mode |= syscall.S_IFIFO
}
if err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil {
return err
}
return nil
}
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
if hdr.Typeflag == tar.TypeLink {
if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) {
if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
return err
}
}
} else if hdr.Typeflag != tar.TypeSymlink {
if err := os.Chmod(path, hdrInfo.Mode()); err != nil {
return err
}
}
return nil
}

View File

@@ -0,0 +1,70 @@
// +build windows
package archive
import (
"archive/tar"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/docker/docker/pkg/longpath"
)
// fixVolumePathPrefix does platform specific processing to ensure that if
// the path being passed in is not in a volume path format, convert it to one.
func fixVolumePathPrefix(srcPath string) string {
return longpath.AddPrefix(srcPath)
}
// getWalkRoot calculates the root path when performing a TarWithOptions.
// We use a separate function as this is platform specific.
func getWalkRoot(srcPath string, include string) string {
return filepath.Join(srcPath, include)
}
// CanonicalTarNameForPath returns platform-specific filepath
// to canonical posix-style path for tar archival. p is relative
// path.
func CanonicalTarNameForPath(p string) (string, error) {
// windows: convert windows style relative path with backslashes
// into forward slashes. Since windows does not allow '/' or '\'
// in file names, it is mostly safe to replace however we must
// check just in case
if strings.Contains(p, "/") {
return "", fmt.Errorf("Windows path contains forward slash: %s", p)
}
return strings.Replace(p, string(os.PathSeparator), "/", -1), nil
}
// chmodTarEntry is used to adjust the file permissions used in tar header based
// on the platform the archival is done.
func chmodTarEntry(perm os.FileMode) os.FileMode {
perm &= 0755
// Add the x bit: make everything +x from windows
perm |= 0111
return perm
}
func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) {
// do nothing. no notion of Rdev, Inode, Nlink in stat on Windows
return
}
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
// createTarFile to handle the following types of header: Block; Char; Fifo
func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error {
return nil
}
func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
return nil
}
func getFileUIDGID(stat interface{}) (int, int, error) {
// no notion of file ownership mapping yet on Windows
return 0, 0, nil
}

446
vendor/github.com/docker/docker/pkg/archive/changes.go generated vendored Normal file
View File

@@ -0,0 +1,446 @@
package archive
import (
"archive/tar"
"bytes"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"sort"
"strings"
"syscall"
"time"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/pools"
"github.com/docker/docker/pkg/system"
)
// ChangeType represents the change type.
type ChangeType int
const (
// ChangeModify represents the modify operation.
ChangeModify = iota
// ChangeAdd represents the add operation.
ChangeAdd
// ChangeDelete represents the delete operation.
ChangeDelete
)
func (c ChangeType) String() string {
switch c {
case ChangeModify:
return "C"
case ChangeAdd:
return "A"
case ChangeDelete:
return "D"
}
return ""
}
// Change represents a change, it wraps the change type and path.
// It describes changes of the files in the path respect to the
// parent layers. The change could be modify, add, delete.
// This is used for layer diff.
type Change struct {
Path string
Kind ChangeType
}
func (change *Change) String() string {
return fmt.Sprintf("%s %s", change.Kind, change.Path)
}
// for sort.Sort
type changesByPath []Change
func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path }
func (c changesByPath) Len() int { return len(c) }
func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] }
// Gnu tar and the go tar writer don't have sub-second mtime
// precision, which is problematic when we apply changes via tar
// files, we handle this by comparing for exact times, *or* same
// second count and either a or b having exactly 0 nanoseconds
func sameFsTime(a, b time.Time) bool {
return a == b ||
(a.Unix() == b.Unix() &&
(a.Nanosecond() == 0 || b.Nanosecond() == 0))
}
func sameFsTimeSpec(a, b syscall.Timespec) bool {
return a.Sec == b.Sec &&
(a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0)
}
// Changes walks the path rw and determines changes for the files in the path,
// with respect to the parent layers
func Changes(layers []string, rw string) ([]Change, error) {
return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip)
}
func aufsMetadataSkip(path string) (skip bool, err error) {
skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path)
if err != nil {
skip = true
}
return
}
func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) {
f := filepath.Base(path)
// If there is a whiteout, then the file was removed
if strings.HasPrefix(f, WhiteoutPrefix) {
originalFile := f[len(WhiteoutPrefix):]
return filepath.Join(filepath.Dir(path), originalFile), nil
}
return "", nil
}
type skipChange func(string) (bool, error)
type deleteChange func(string, string, os.FileInfo) (string, error)
func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) {
var (
changes []Change
changedDirs = make(map[string]struct{})
)
err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error {
if err != nil {
return err
}
// Rebase path
path, err = filepath.Rel(rw, path)
if err != nil {
return err
}
// As this runs on the daemon side, file paths are OS specific.
path = filepath.Join(string(os.PathSeparator), path)
// Skip root
if path == string(os.PathSeparator) {
return nil
}
if sc != nil {
if skip, err := sc(path); skip {
return err
}
}
change := Change{
Path: path,
}
deletedFile, err := dc(rw, path, f)
if err != nil {
return err
}
// Find out what kind of modification happened
if deletedFile != "" {
change.Path = deletedFile
change.Kind = ChangeDelete
} else {
// Otherwise, the file was added
change.Kind = ChangeAdd
// ...Unless it already existed in a top layer, in which case, it's a modification
for _, layer := range layers {
stat, err := os.Stat(filepath.Join(layer, path))
if err != nil && !os.IsNotExist(err) {
return err
}
if err == nil {
// The file existed in the top layer, so that's a modification
// However, if it's a directory, maybe it wasn't actually modified.
// If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar
if stat.IsDir() && f.IsDir() {
if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) {
// Both directories are the same, don't record the change
return nil
}
}
change.Kind = ChangeModify
break
}
}
}
// If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files.
// This block is here to ensure the change is recorded even if the
// modify time, mode and size of the parent directory in the rw and ro layers are all equal.
// Check https://github.com/docker/docker/pull/13590 for details.
if f.IsDir() {
changedDirs[path] = struct{}{}
}
if change.Kind == ChangeAdd || change.Kind == ChangeDelete {
parent := filepath.Dir(path)
if _, ok := changedDirs[parent]; !ok && parent != "/" {
changes = append(changes, Change{Path: parent, Kind: ChangeModify})
changedDirs[parent] = struct{}{}
}
}
// Record change
changes = append(changes, change)
return nil
})
if err != nil && !os.IsNotExist(err) {
return nil, err
}
return changes, nil
}
// FileInfo describes the information of a file.
type FileInfo struct {
parent *FileInfo
name string
stat *system.StatT
children map[string]*FileInfo
capability []byte
added bool
}
// LookUp looks up the file information of a file.
func (info *FileInfo) LookUp(path string) *FileInfo {
// As this runs on the daemon side, file paths are OS specific.
parent := info
if path == string(os.PathSeparator) {
return info
}
pathElements := strings.Split(path, string(os.PathSeparator))
for _, elem := range pathElements {
if elem != "" {
child := parent.children[elem]
if child == nil {
return nil
}
parent = child
}
}
return parent
}
func (info *FileInfo) path() string {
if info.parent == nil {
// As this runs on the daemon side, file paths are OS specific.
return string(os.PathSeparator)
}
return filepath.Join(info.parent.path(), info.name)
}
func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
sizeAtEntry := len(*changes)
if oldInfo == nil {
// add
change := Change{
Path: info.path(),
Kind: ChangeAdd,
}
*changes = append(*changes, change)
info.added = true
}
// We make a copy so we can modify it to detect additions
// also, we only recurse on the old dir if the new info is a directory
// otherwise any previous delete/change is considered recursive
oldChildren := make(map[string]*FileInfo)
if oldInfo != nil && info.isDir() {
for k, v := range oldInfo.children {
oldChildren[k] = v
}
}
for name, newChild := range info.children {
oldChild, _ := oldChildren[name]
if oldChild != nil {
// change?
oldStat := oldChild.stat
newStat := newChild.stat
// Note: We can't compare inode or ctime or blocksize here, because these change
// when copying a file into a container. However, that is not generally a problem
// because any content change will change mtime, and any status change should
// be visible when actually comparing the stat fields. The only time this
// breaks down is if some code intentionally hides a change by setting
// back mtime
if statDifferent(oldStat, newStat) ||
bytes.Compare(oldChild.capability, newChild.capability) != 0 {
change := Change{
Path: newChild.path(),
Kind: ChangeModify,
}
*changes = append(*changes, change)
newChild.added = true
}
// Remove from copy so we can detect deletions
delete(oldChildren, name)
}
newChild.addChanges(oldChild, changes)
}
for _, oldChild := range oldChildren {
// delete
change := Change{
Path: oldChild.path(),
Kind: ChangeDelete,
}
*changes = append(*changes, change)
}
// If there were changes inside this directory, we need to add it, even if the directory
// itself wasn't changed. This is needed to properly save and restore filesystem permissions.
// As this runs on the daemon side, file paths are OS specific.
if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) {
change := Change{
Path: info.path(),
Kind: ChangeModify,
}
// Let's insert the directory entry before the recently added entries located inside this dir
*changes = append(*changes, change) // just to resize the slice, will be overwritten
copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:])
(*changes)[sizeAtEntry] = change
}
}
// Changes add changes to file information.
func (info *FileInfo) Changes(oldInfo *FileInfo) []Change {
var changes []Change
info.addChanges(oldInfo, &changes)
return changes
}
func newRootFileInfo() *FileInfo {
// As this runs on the daemon side, file paths are OS specific.
root := &FileInfo{
name: string(os.PathSeparator),
children: make(map[string]*FileInfo),
}
return root
}
// ChangesDirs compares two directories and generates an array of Change objects describing the changes.
// If oldDir is "", then all files in newDir will be Add-Changes.
func ChangesDirs(newDir, oldDir string) ([]Change, error) {
var (
oldRoot, newRoot *FileInfo
)
if oldDir == "" {
emptyDir, err := ioutil.TempDir("", "empty")
if err != nil {
return nil, err
}
defer os.Remove(emptyDir)
oldDir = emptyDir
}
oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir)
if err != nil {
return nil, err
}
return newRoot.Changes(oldRoot), nil
}
// ChangesSize calculates the size in bytes of the provided changes, based on newDir.
func ChangesSize(newDir string, changes []Change) int64 {
var (
size int64
sf = make(map[uint64]struct{})
)
for _, change := range changes {
if change.Kind == ChangeModify || change.Kind == ChangeAdd {
file := filepath.Join(newDir, change.Path)
fileInfo, err := os.Lstat(file)
if err != nil {
logrus.Errorf("Can not stat %q: %s", file, err)
continue
}
if fileInfo != nil && !fileInfo.IsDir() {
if hasHardlinks(fileInfo) {
inode := getIno(fileInfo)
if _, ok := sf[inode]; !ok {
size += fileInfo.Size()
sf[inode] = struct{}{}
}
} else {
size += fileInfo.Size()
}
}
}
}
return size
}
// ExportChanges produces an Archive from the provided changes, relative to dir.
func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) {
reader, writer := io.Pipe()
go func() {
ta := &tarAppender{
TarWriter: tar.NewWriter(writer),
Buffer: pools.BufioWriter32KPool.Get(nil),
SeenFiles: make(map[uint64]string),
UIDMaps: uidMaps,
GIDMaps: gidMaps,
}
// this buffer is needed for the duration of this piped stream
defer pools.BufioWriter32KPool.Put(ta.Buffer)
sort.Sort(changesByPath(changes))
// In general we log errors here but ignore them because
// during e.g. a diff operation the container can continue
// mutating the filesystem and we can see transient errors
// from this
for _, change := range changes {
if change.Kind == ChangeDelete {
whiteOutDir := filepath.Dir(change.Path)
whiteOutBase := filepath.Base(change.Path)
whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase)
timestamp := time.Now()
hdr := &tar.Header{
Name: whiteOut[1:],
Size: 0,
ModTime: timestamp,
AccessTime: timestamp,
ChangeTime: timestamp,
}
if err := ta.TarWriter.WriteHeader(hdr); err != nil {
logrus.Debugf("Can't write whiteout header: %s", err)
}
} else {
path := filepath.Join(dir, change.Path)
if err := ta.addTarFile(path, change.Path[1:]); err != nil {
logrus.Debugf("Can't add file %s to tar: %s", path, err)
}
}
}
// Make sure to check the error on Close.
if err := ta.TarWriter.Close(); err != nil {
logrus.Debugf("Can't close layer: %s", err)
}
if err := writer.Close(); err != nil {
logrus.Debugf("failed close Changes writer: %s", err)
}
}()
return reader, nil
}

View File

@@ -0,0 +1,312 @@
package archive
import (
"bytes"
"fmt"
"os"
"path/filepath"
"sort"
"syscall"
"unsafe"
"github.com/docker/docker/pkg/system"
)
// walker is used to implement collectFileInfoForChanges on linux. Where this
// method in general returns the entire contents of two directory trees, we
// optimize some FS calls out on linux. In particular, we take advantage of the
// fact that getdents(2) returns the inode of each file in the directory being
// walked, which, when walking two trees in parallel to generate a list of
// changes, can be used to prune subtrees without ever having to lstat(2) them
// directly. Eliminating stat calls in this way can save up to seconds on large
// images.
type walker struct {
dir1 string
dir2 string
root1 *FileInfo
root2 *FileInfo
}
// collectFileInfoForChanges returns a complete representation of the trees
// rooted at dir1 and dir2, with one important exception: any subtree or
// leaf where the inode and device numbers are an exact match between dir1
// and dir2 will be pruned from the results. This method is *only* to be used
// to generating a list of changes between the two directories, as it does not
// reflect the full contents.
func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) {
w := &walker{
dir1: dir1,
dir2: dir2,
root1: newRootFileInfo(),
root2: newRootFileInfo(),
}
i1, err := os.Lstat(w.dir1)
if err != nil {
return nil, nil, err
}
i2, err := os.Lstat(w.dir2)
if err != nil {
return nil, nil, err
}
if err := w.walk("/", i1, i2); err != nil {
return nil, nil, err
}
return w.root1, w.root2, nil
}
// Given a FileInfo, its path info, and a reference to the root of the tree
// being constructed, register this file with the tree.
func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error {
if fi == nil {
return nil
}
parent := root.LookUp(filepath.Dir(path))
if parent == nil {
return fmt.Errorf("walkchunk: Unexpectedly no parent for %s", path)
}
info := &FileInfo{
name: filepath.Base(path),
children: make(map[string]*FileInfo),
parent: parent,
}
cpath := filepath.Join(dir, path)
stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t))
if err != nil {
return err
}
info.stat = stat
info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access
parent.children[info.name] = info
return nil
}
// Walk a subtree rooted at the same path in both trees being iterated. For
// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d
func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) {
// Register these nodes with the return trees, unless we're still at the
// (already-created) roots:
if path != "/" {
if err := walkchunk(path, i1, w.dir1, w.root1); err != nil {
return err
}
if err := walkchunk(path, i2, w.dir2, w.root2); err != nil {
return err
}
}
is1Dir := i1 != nil && i1.IsDir()
is2Dir := i2 != nil && i2.IsDir()
sameDevice := false
if i1 != nil && i2 != nil {
si1 := i1.Sys().(*syscall.Stat_t)
si2 := i2.Sys().(*syscall.Stat_t)
if si1.Dev == si2.Dev {
sameDevice = true
}
}
// If these files are both non-existent, or leaves (non-dirs), we are done.
if !is1Dir && !is2Dir {
return nil
}
// Fetch the names of all the files contained in both directories being walked:
var names1, names2 []nameIno
if is1Dir {
names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access
if err != nil {
return err
}
}
if is2Dir {
names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access
if err != nil {
return err
}
}
// We have lists of the files contained in both parallel directories, sorted
// in the same order. Walk them in parallel, generating a unique merged list
// of all items present in either or both directories.
var names []string
ix1 := 0
ix2 := 0
for {
if ix1 >= len(names1) {
break
}
if ix2 >= len(names2) {
break
}
ni1 := names1[ix1]
ni2 := names2[ix2]
switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) {
case -1: // ni1 < ni2 -- advance ni1
// we will not encounter ni1 in names2
names = append(names, ni1.name)
ix1++
case 0: // ni1 == ni2
if ni1.ino != ni2.ino || !sameDevice {
names = append(names, ni1.name)
}
ix1++
ix2++
case 1: // ni1 > ni2 -- advance ni2
// we will not encounter ni2 in names1
names = append(names, ni2.name)
ix2++
}
}
for ix1 < len(names1) {
names = append(names, names1[ix1].name)
ix1++
}
for ix2 < len(names2) {
names = append(names, names2[ix2].name)
ix2++
}
// For each of the names present in either or both of the directories being
// iterated, stat the name under each root, and recurse the pair of them:
for _, name := range names {
fname := filepath.Join(path, name)
var cInfo1, cInfo2 os.FileInfo
if is1Dir {
cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access
if err != nil && !os.IsNotExist(err) {
return err
}
}
if is2Dir {
cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access
if err != nil && !os.IsNotExist(err) {
return err
}
}
if err = w.walk(fname, cInfo1, cInfo2); err != nil {
return err
}
}
return nil
}
// {name,inode} pairs used to support the early-pruning logic of the walker type
type nameIno struct {
name string
ino uint64
}
type nameInoSlice []nameIno
func (s nameInoSlice) Len() int { return len(s) }
func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name }
// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode
// numbers further up the stack when reading directory contents. Unlike
// os.Readdirnames, which returns a list of filenames, this function returns a
// list of {filename,inode} pairs.
func readdirnames(dirname string) (names []nameIno, err error) {
var (
size = 100
buf = make([]byte, 4096)
nbuf int
bufp int
nb int
)
f, err := os.Open(dirname)
if err != nil {
return nil, err
}
defer f.Close()
names = make([]nameIno, 0, size) // Empty with room to grow.
for {
// Refill the buffer if necessary
if bufp >= nbuf {
bufp = 0
nbuf, err = syscall.ReadDirent(int(f.Fd()), buf) // getdents on linux
if nbuf < 0 {
nbuf = 0
}
if err != nil {
return nil, os.NewSyscallError("readdirent", err)
}
if nbuf <= 0 {
break // EOF
}
}
// Drain the buffer
nb, names = parseDirent(buf[bufp:nbuf], names)
bufp += nb
}
sl := nameInoSlice(names)
sort.Sort(sl)
return sl, nil
}
// parseDirent is a minor modification of syscall.ParseDirent (linux version)
// which returns {name,inode} pairs instead of just names.
func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) {
origlen := len(buf)
for len(buf) > 0 {
dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0]))
buf = buf[dirent.Reclen:]
if dirent.Ino == 0 { // File absent in directory.
continue
}
bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0]))
var name = string(bytes[0:clen(bytes[:])])
if name == "." || name == ".." { // Useless names
continue
}
names = append(names, nameIno{name, dirent.Ino})
}
return origlen - len(buf), names
}
func clen(n []byte) int {
for i := 0; i < len(n); i++ {
if n[i] == 0 {
return i
}
}
return len(n)
}
// OverlayChanges walks the path rw and determines changes for the files in the path,
// with respect to the parent layers
func OverlayChanges(layers []string, rw string) ([]Change, error) {
return changes(layers, rw, overlayDeletedFile, nil)
}
func overlayDeletedFile(root, path string, fi os.FileInfo) (string, error) {
if fi.Mode()&os.ModeCharDevice != 0 {
s := fi.Sys().(*syscall.Stat_t)
if major(uint64(s.Rdev)) == 0 && minor(uint64(s.Rdev)) == 0 {
return path, nil
}
}
if fi.Mode()&os.ModeDir != 0 {
opaque, err := system.Lgetxattr(filepath.Join(root, path), "trusted.overlay.opaque")
if err != nil {
return "", err
}
if len(opaque) == 1 && opaque[0] == 'y' {
return path, nil
}
}
return "", nil
}

View File

@@ -0,0 +1,97 @@
// +build !linux
package archive
import (
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
"github.com/docker/docker/pkg/system"
)
func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) {
var (
oldRoot, newRoot *FileInfo
err1, err2 error
errs = make(chan error, 2)
)
go func() {
oldRoot, err1 = collectFileInfo(oldDir)
errs <- err1
}()
go func() {
newRoot, err2 = collectFileInfo(newDir)
errs <- err2
}()
// block until both routines have returned
for i := 0; i < 2; i++ {
if err := <-errs; err != nil {
return nil, nil, err
}
}
return oldRoot, newRoot, nil
}
func collectFileInfo(sourceDir string) (*FileInfo, error) {
root := newRootFileInfo()
err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error {
if err != nil {
return err
}
// Rebase path
relPath, err := filepath.Rel(sourceDir, path)
if err != nil {
return err
}
// As this runs on the daemon side, file paths are OS specific.
relPath = filepath.Join(string(os.PathSeparator), relPath)
// See https://github.com/golang/go/issues/9168 - bug in filepath.Join.
// Temporary workaround. If the returned path starts with two backslashes,
// trim it down to a single backslash. Only relevant on Windows.
if runtime.GOOS == "windows" {
if strings.HasPrefix(relPath, `\\`) {
relPath = relPath[1:]
}
}
if relPath == string(os.PathSeparator) {
return nil
}
parent := root.LookUp(filepath.Dir(relPath))
if parent == nil {
return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath)
}
info := &FileInfo{
name: filepath.Base(relPath),
children: make(map[string]*FileInfo),
parent: parent,
}
s, err := system.Lstat(path)
if err != nil {
return err
}
info.stat = s
info.capability, _ = system.Lgetxattr(path, "security.capability")
parent.children[info.name] = info
return nil
})
if err != nil {
return nil, err
}
return root, nil
}

View File

@@ -0,0 +1,36 @@
// +build !windows
package archive
import (
"os"
"syscall"
"github.com/docker/docker/pkg/system"
)
func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
// Don't look at size for dirs, its not a good measure of change
if oldStat.Mode() != newStat.Mode() ||
oldStat.UID() != newStat.UID() ||
oldStat.GID() != newStat.GID() ||
oldStat.Rdev() != newStat.Rdev() ||
// Don't look at size for dirs, its not a good measure of change
(oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR &&
(!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) {
return true
}
return false
}
func (info *FileInfo) isDir() bool {
return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR != 0
}
func getIno(fi os.FileInfo) uint64 {
return uint64(fi.Sys().(*syscall.Stat_t).Ino)
}
func hasHardlinks(fi os.FileInfo) bool {
return fi.Sys().(*syscall.Stat_t).Nlink > 1
}

View File

@@ -0,0 +1,30 @@
package archive
import (
"os"
"github.com/docker/docker/pkg/system"
)
func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool {
// Don't look at size for dirs, its not a good measure of change
if oldStat.ModTime() != newStat.ModTime() ||
oldStat.Mode() != newStat.Mode() ||
oldStat.Size() != newStat.Size() && !oldStat.IsDir() {
return true
}
return false
}
func (info *FileInfo) isDir() bool {
return info.parent == nil || info.stat.IsDir()
}
func getIno(fi os.FileInfo) (inode uint64) {
return
}
func hasHardlinks(fi os.FileInfo) bool {
return false
}

458
vendor/github.com/docker/docker/pkg/archive/copy.go generated vendored Normal file
View File

@@ -0,0 +1,458 @@
package archive
import (
"archive/tar"
"errors"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/pkg/system"
)
// Errors used or returned by this file.
var (
ErrNotDirectory = errors.New("not a directory")
ErrDirNotExists = errors.New("no such directory")
ErrCannotCopyDir = errors.New("cannot copy directory")
ErrInvalidCopySource = errors.New("invalid copy source content")
)
// PreserveTrailingDotOrSeparator returns the given cleaned path (after
// processing using any utility functions from the path or filepath stdlib
// packages) and appends a trailing `/.` or `/` if its corresponding original
// path (from before being processed by utility functions from the path or
// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned
// path already ends in a `.` path segment, then another is not added. If the
// clean path already ends in a path separator, then another is not added.
func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string {
// Ensure paths are in platform semantics
cleanedPath = normalizePath(cleanedPath)
originalPath = normalizePath(originalPath)
if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) {
if !hasTrailingPathSeparator(cleanedPath) {
// Add a separator if it doesn't already end with one (a cleaned
// path would only end in a separator if it is the root).
cleanedPath += string(filepath.Separator)
}
cleanedPath += "."
}
if !hasTrailingPathSeparator(cleanedPath) && hasTrailingPathSeparator(originalPath) {
cleanedPath += string(filepath.Separator)
}
return cleanedPath
}
// assertsDirectory returns whether the given path is
// asserted to be a directory, i.e., the path ends with
// a trailing '/' or `/.`, assuming a path separator of `/`.
func assertsDirectory(path string) bool {
return hasTrailingPathSeparator(path) || specifiesCurrentDir(path)
}
// hasTrailingPathSeparator returns whether the given
// path ends with the system's path separator character.
func hasTrailingPathSeparator(path string) bool {
return len(path) > 0 && os.IsPathSeparator(path[len(path)-1])
}
// specifiesCurrentDir returns whether the given path specifies
// a "current directory", i.e., the last path segment is `.`.
func specifiesCurrentDir(path string) bool {
return filepath.Base(path) == "."
}
// SplitPathDirEntry splits the given path between its directory name and its
// basename by first cleaning the path but preserves a trailing "." if the
// original path specified the current directory.
func SplitPathDirEntry(path string) (dir, base string) {
cleanedPath := filepath.Clean(normalizePath(path))
if specifiesCurrentDir(path) {
cleanedPath += string(filepath.Separator) + "."
}
return filepath.Dir(cleanedPath), filepath.Base(cleanedPath)
}
// TarResource archives the resource described by the given CopyInfo to a Tar
// archive. A non-nil error is returned if sourcePath does not exist or is
// asserted to be a directory but exists as another type of file.
//
// This function acts as a convenient wrapper around TarWithOptions, which
// requires a directory as the source path. TarResource accepts either a
// directory or a file path and correctly sets the Tar options.
func TarResource(sourceInfo CopyInfo) (content io.ReadCloser, err error) {
return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName)
}
// TarResourceRebase is like TarResource but renames the first path element of
// items in the resulting tar archive to match the given rebaseName if not "".
func TarResourceRebase(sourcePath, rebaseName string) (content io.ReadCloser, err error) {
sourcePath = normalizePath(sourcePath)
if _, err = os.Lstat(sourcePath); err != nil {
// Catches the case where the source does not exist or is not a
// directory if asserted to be a directory, as this also causes an
// error.
return
}
// Separate the source path between its directory and
// the entry in that directory which we are archiving.
sourceDir, sourceBase := SplitPathDirEntry(sourcePath)
filter := []string{sourceBase}
logrus.Debugf("copying %q from %q", sourceBase, sourceDir)
return TarWithOptions(sourceDir, &TarOptions{
Compression: Uncompressed,
IncludeFiles: filter,
IncludeSourceDir: true,
RebaseNames: map[string]string{
sourceBase: rebaseName,
},
})
}
// CopyInfo holds basic info about the source
// or destination path of a copy operation.
type CopyInfo struct {
Path string
Exists bool
IsDir bool
RebaseName string
}
// CopyInfoSourcePath stats the given path to create a CopyInfo
// struct representing that resource for the source of an archive copy
// operation. The given path should be an absolute local path. A source path
// has all symlinks evaluated that appear before the last path separator ("/"
// on Unix). As it is to be a copy source, the path must exist.
func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) {
// normalize the file path and then evaluate the symbol link
// we will use the target file instead of the symbol link if
// followLink is set
path = normalizePath(path)
resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink)
if err != nil {
return CopyInfo{}, err
}
stat, err := os.Lstat(resolvedPath)
if err != nil {
return CopyInfo{}, err
}
return CopyInfo{
Path: resolvedPath,
Exists: true,
IsDir: stat.IsDir(),
RebaseName: rebaseName,
}, nil
}
// CopyInfoDestinationPath stats the given path to create a CopyInfo
// struct representing that resource for the destination of an archive copy
// operation. The given path should be an absolute local path.
func CopyInfoDestinationPath(path string) (info CopyInfo, err error) {
maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot.
path = normalizePath(path)
originalPath := path
stat, err := os.Lstat(path)
if err == nil && stat.Mode()&os.ModeSymlink == 0 {
// The path exists and is not a symlink.
return CopyInfo{
Path: path,
Exists: true,
IsDir: stat.IsDir(),
}, nil
}
// While the path is a symlink.
for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ {
if n > maxSymlinkIter {
// Don't follow symlinks more than this arbitrary number of times.
return CopyInfo{}, errors.New("too many symlinks in " + originalPath)
}
// The path is a symbolic link. We need to evaluate it so that the
// destination of the copy operation is the link target and not the
// link itself. This is notably different than CopyInfoSourcePath which
// only evaluates symlinks before the last appearing path separator.
// Also note that it is okay if the last path element is a broken
// symlink as the copy operation should create the target.
var linkTarget string
linkTarget, err = os.Readlink(path)
if err != nil {
return CopyInfo{}, err
}
if !system.IsAbs(linkTarget) {
// Join with the parent directory.
dstParent, _ := SplitPathDirEntry(path)
linkTarget = filepath.Join(dstParent, linkTarget)
}
path = linkTarget
stat, err = os.Lstat(path)
}
if err != nil {
// It's okay if the destination path doesn't exist. We can still
// continue the copy operation if the parent directory exists.
if !os.IsNotExist(err) {
return CopyInfo{}, err
}
// Ensure destination parent dir exists.
dstParent, _ := SplitPathDirEntry(path)
parentDirStat, err := os.Lstat(dstParent)
if err != nil {
return CopyInfo{}, err
}
if !parentDirStat.IsDir() {
return CopyInfo{}, ErrNotDirectory
}
return CopyInfo{Path: path}, nil
}
// The path exists after resolving symlinks.
return CopyInfo{
Path: path,
Exists: true,
IsDir: stat.IsDir(),
}, nil
}
// PrepareArchiveCopy prepares the given srcContent archive, which should
// contain the archived resource described by srcInfo, to the destination
// described by dstInfo. Returns the possibly modified content archive along
// with the path to the destination directory which it should be extracted to.
func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content io.ReadCloser, err error) {
// Ensure in platform semantics
srcInfo.Path = normalizePath(srcInfo.Path)
dstInfo.Path = normalizePath(dstInfo.Path)
// Separate the destination path between its directory and base
// components in case the source archive contents need to be rebased.
dstDir, dstBase := SplitPathDirEntry(dstInfo.Path)
_, srcBase := SplitPathDirEntry(srcInfo.Path)
switch {
case dstInfo.Exists && dstInfo.IsDir:
// The destination exists as a directory. No alteration
// to srcContent is needed as its contents can be
// simply extracted to the destination directory.
return dstInfo.Path, ioutil.NopCloser(srcContent), nil
case dstInfo.Exists && srcInfo.IsDir:
// The destination exists as some type of file and the source
// content is a directory. This is an error condition since
// you cannot copy a directory to an existing file location.
return "", nil, ErrCannotCopyDir
case dstInfo.Exists:
// The destination exists as some type of file and the source content
// is also a file. The source content entry will have to be renamed to
// have a basename which matches the destination path's basename.
if len(srcInfo.RebaseName) != 0 {
srcBase = srcInfo.RebaseName
}
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
case srcInfo.IsDir:
// The destination does not exist and the source content is an archive
// of a directory. The archive should be extracted to the parent of
// the destination path instead, and when it is, the directory that is
// created as a result should take the name of the destination path.
// The source content entries will have to be renamed to have a
// basename which matches the destination path's basename.
if len(srcInfo.RebaseName) != 0 {
srcBase = srcInfo.RebaseName
}
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
case assertsDirectory(dstInfo.Path):
// The destination does not exist and is asserted to be created as a
// directory, but the source content is not a directory. This is an
// error condition since you cannot create a directory from a file
// source.
return "", nil, ErrDirNotExists
default:
// The last remaining case is when the destination does not exist, is
// not asserted to be a directory, and the source content is not an
// archive of a directory. It this case, the destination file will need
// to be created when the archive is extracted and the source content
// entry will have to be renamed to have a basename which matches the
// destination path's basename.
if len(srcInfo.RebaseName) != 0 {
srcBase = srcInfo.RebaseName
}
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
}
}
// RebaseArchiveEntries rewrites the given srcContent archive replacing
// an occurrence of oldBase with newBase at the beginning of entry names.
func RebaseArchiveEntries(srcContent io.Reader, oldBase, newBase string) io.ReadCloser {
if oldBase == string(os.PathSeparator) {
// If oldBase specifies the root directory, use an empty string as
// oldBase instead so that newBase doesn't replace the path separator
// that all paths will start with.
oldBase = ""
}
rebased, w := io.Pipe()
go func() {
srcTar := tar.NewReader(srcContent)
rebasedTar := tar.NewWriter(w)
for {
hdr, err := srcTar.Next()
if err == io.EOF {
// Signals end of archive.
rebasedTar.Close()
w.Close()
return
}
if err != nil {
w.CloseWithError(err)
return
}
hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1)
if err = rebasedTar.WriteHeader(hdr); err != nil {
w.CloseWithError(err)
return
}
if _, err = io.Copy(rebasedTar, srcTar); err != nil {
w.CloseWithError(err)
return
}
}
}()
return rebased
}
// CopyResource performs an archive copy from the given source path to the
// given destination path. The source path MUST exist and the destination
// path's parent directory must exist.
func CopyResource(srcPath, dstPath string, followLink bool) error {
var (
srcInfo CopyInfo
err error
)
// Ensure in platform semantics
srcPath = normalizePath(srcPath)
dstPath = normalizePath(dstPath)
// Clean the source and destination paths.
srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath)
dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath)
if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil {
return err
}
content, err := TarResource(srcInfo)
if err != nil {
return err
}
defer content.Close()
return CopyTo(content, srcInfo, dstPath)
}
// CopyTo handles extracting the given content whose
// entries should be sourced from srcInfo to dstPath.
func CopyTo(content io.Reader, srcInfo CopyInfo, dstPath string) error {
// The destination path need not exist, but CopyInfoDestinationPath will
// ensure that at least the parent directory exists.
dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath))
if err != nil {
return err
}
dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo)
if err != nil {
return err
}
defer copyArchive.Close()
options := &TarOptions{
NoLchown: true,
NoOverwriteDirNonDir: true,
}
return Untar(copyArchive, dstDir, options)
}
// ResolveHostSourcePath decides real path need to be copied with parameters such as
// whether to follow symbol link or not, if followLink is true, resolvedPath will return
// link target of any symbol link file, else it will only resolve symlink of directory
// but return symbol link file itself without resolving.
func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) {
if followLink {
resolvedPath, err = filepath.EvalSymlinks(path)
if err != nil {
return
}
resolvedPath, rebaseName = GetRebaseName(path, resolvedPath)
} else {
dirPath, basePath := filepath.Split(path)
// if not follow symbol link, then resolve symbol link of parent dir
var resolvedDirPath string
resolvedDirPath, err = filepath.EvalSymlinks(dirPath)
if err != nil {
return
}
// resolvedDirPath will have been cleaned (no trailing path separators) so
// we can manually join it with the base path element.
resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath
if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) {
rebaseName = filepath.Base(path)
}
}
return resolvedPath, rebaseName, nil
}
// GetRebaseName normalizes and compares path and resolvedPath,
// return completed resolved path and rebased file name
func GetRebaseName(path, resolvedPath string) (string, string) {
// linkTarget will have been cleaned (no trailing path separators and dot) so
// we can manually join it with them
var rebaseName string
if specifiesCurrentDir(path) && !specifiesCurrentDir(resolvedPath) {
resolvedPath += string(filepath.Separator) + "."
}
if hasTrailingPathSeparator(path) && !hasTrailingPathSeparator(resolvedPath) {
resolvedPath += string(filepath.Separator)
}
if filepath.Base(path) != filepath.Base(resolvedPath) {
// In the case where the path had a trailing separator and a symlink
// evaluation has changed the last path component, we will need to
// rebase the name in the archive that is being copied to match the
// originally requested name.
rebaseName = filepath.Base(path)
}
return resolvedPath, rebaseName
}

View File

@@ -0,0 +1,11 @@
// +build !windows
package archive
import (
"path/filepath"
)
func normalizePath(path string) string {
return filepath.ToSlash(path)
}

View File

@@ -0,0 +1,9 @@
package archive
import (
"path/filepath"
)
func normalizePath(path string) string {
return filepath.FromSlash(path)
}

279
vendor/github.com/docker/docker/pkg/archive/diff.go generated vendored Normal file
View File

@@ -0,0 +1,279 @@
package archive
import (
"archive/tar"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"strings"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/pkg/idtools"
"github.com/docker/docker/pkg/pools"
"github.com/docker/docker/pkg/system"
)
// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be
// compressed or uncompressed.
// Returns the size in bytes of the contents of the layer.
func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64, err error) {
tr := tar.NewReader(layer)
trBuf := pools.BufioReader32KPool.Get(tr)
defer pools.BufioReader32KPool.Put(trBuf)
var dirs []*tar.Header
unpackedPaths := make(map[string]struct{})
if options == nil {
options = &TarOptions{}
}
if options.ExcludePatterns == nil {
options.ExcludePatterns = []string{}
}
remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
if err != nil {
return 0, err
}
aufsTempdir := ""
aufsHardlinks := make(map[string]*tar.Header)
if options == nil {
options = &TarOptions{}
}
// Iterate through the files in the archive.
for {
hdr, err := tr.Next()
if err == io.EOF {
// end of tar archive
break
}
if err != nil {
return 0, err
}
size += hdr.Size
// Normalize name, for safety and for a simple is-root check
hdr.Name = filepath.Clean(hdr.Name)
// Windows does not support filenames with colons in them. Ignore
// these files. This is not a problem though (although it might
// appear that it is). Let's suppose a client is running docker pull.
// The daemon it points to is Windows. Would it make sense for the
// client to be doing a docker pull Ubuntu for example (which has files
// with colons in the name under /usr/share/man/man3)? No, absolutely
// not as it would really only make sense that they were pulling a
// Windows image. However, for development, it is necessary to be able
// to pull Linux images which are in the repository.
//
// TODO Windows. Once the registry is aware of what images are Windows-
// specific or Linux-specific, this warning should be changed to an error
// to cater for the situation where someone does manage to upload a Linux
// image but have it tagged as Windows inadvertently.
if runtime.GOOS == "windows" {
if strings.Contains(hdr.Name, ":") {
logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name)
continue
}
}
// Note as these operations are platform specific, so must the slash be.
if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
// Not the root directory, ensure that the parent directory exists.
// This happened in some tests where an image had a tarfile without any
// parent directories.
parent := filepath.Dir(hdr.Name)
parentPath := filepath.Join(dest, parent)
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
err = system.MkdirAll(parentPath, 0600)
if err != nil {
return 0, err
}
}
}
// Skip AUFS metadata dirs
if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) {
// Regular files inside /.wh..wh.plnk can be used as hardlink targets
// We don't want this directory, but we need the files in them so that
// such hardlinks can be resolved.
if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg {
basename := filepath.Base(hdr.Name)
aufsHardlinks[basename] = hdr
if aufsTempdir == "" {
if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil {
return 0, err
}
defer os.RemoveAll(aufsTempdir)
}
if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil, options.InUserNS); err != nil {
return 0, err
}
}
if hdr.Name != WhiteoutOpaqueDir {
continue
}
}
path := filepath.Join(dest, hdr.Name)
rel, err := filepath.Rel(dest, path)
if err != nil {
return 0, err
}
// Note as these operations are platform specific, so must the slash be.
if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
}
base := filepath.Base(path)
if strings.HasPrefix(base, WhiteoutPrefix) {
dir := filepath.Dir(path)
if base == WhiteoutOpaqueDir {
_, err := os.Lstat(dir)
if err != nil {
return 0, err
}
err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if err != nil {
if os.IsNotExist(err) {
err = nil // parent was deleted
}
return err
}
if path == dir {
return nil
}
if _, exists := unpackedPaths[path]; !exists {
err := os.RemoveAll(path)
return err
}
return nil
})
if err != nil {
return 0, err
}
} else {
originalBase := base[len(WhiteoutPrefix):]
originalPath := filepath.Join(dir, originalBase)
if err := os.RemoveAll(originalPath); err != nil {
return 0, err
}
}
} else {
// If path exits we almost always just want to remove and replace it.
// The only exception is when it is a directory *and* the file from
// the layer is also a directory. Then we want to merge them (i.e.
// just apply the metadata from the layer).
if fi, err := os.Lstat(path); err == nil {
if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
if err := os.RemoveAll(path); err != nil {
return 0, err
}
}
}
trBuf.Reset(tr)
srcData := io.Reader(trBuf)
srcHdr := hdr
// Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so
// we manually retarget these into the temporary files we extracted them into
if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) {
linkBasename := filepath.Base(hdr.Linkname)
srcHdr = aufsHardlinks[linkBasename]
if srcHdr == nil {
return 0, fmt.Errorf("Invalid aufs hardlink")
}
tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename))
if err != nil {
return 0, err
}
defer tmpFile.Close()
srcData = tmpFile
}
// if the options contain a uid & gid maps, convert header uid/gid
// entries using the maps such that lchown sets the proper mapped
// uid/gid after writing the file. We only perform this mapping if
// the file isn't already owned by the remapped root UID or GID, as
// that specific uid/gid has no mapping from container -> host, and
// those files already have the proper ownership for inside the
// container.
if srcHdr.Uid != remappedRootUID {
xUID, err := idtools.ToHost(srcHdr.Uid, options.UIDMaps)
if err != nil {
return 0, err
}
srcHdr.Uid = xUID
}
if srcHdr.Gid != remappedRootGID {
xGID, err := idtools.ToHost(srcHdr.Gid, options.GIDMaps)
if err != nil {
return 0, err
}
srcHdr.Gid = xGID
}
if err := createTarFile(path, dest, srcHdr, srcData, true, nil, options.InUserNS); err != nil {
return 0, err
}
// Directory mtimes must be handled at the end to avoid further
// file creation in them to modify the directory mtime
if hdr.Typeflag == tar.TypeDir {
dirs = append(dirs, hdr)
}
unpackedPaths[path] = struct{}{}
}
}
for _, hdr := range dirs {
path := filepath.Join(dest, hdr.Name)
if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
return 0, err
}
}
return size, nil
}
// ApplyLayer parses a diff in the standard layer format from `layer`,
// and applies it to the directory `dest`. The stream `layer` can be
// compressed or uncompressed.
// Returns the size in bytes of the contents of the layer.
func ApplyLayer(dest string, layer io.Reader) (int64, error) {
return applyLayerHandler(dest, layer, &TarOptions{}, true)
}
// ApplyUncompressedLayer parses a diff in the standard layer format from
// `layer`, and applies it to the directory `dest`. The stream `layer`
// can only be uncompressed.
// Returns the size in bytes of the contents of the layer.
func ApplyUncompressedLayer(dest string, layer io.Reader, options *TarOptions) (int64, error) {
return applyLayerHandler(dest, layer, options, false)
}
// do the bulk load of ApplyLayer, but allow for not calling DecompressStream
func applyLayerHandler(dest string, layer io.Reader, options *TarOptions, decompress bool) (int64, error) {
dest = filepath.Clean(dest)
// We need to be able to set any perms
oldmask, err := system.Umask(0)
if err != nil {
return 0, err
}
defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform
if decompress {
layer, err = DecompressStream(layer)
if err != nil {
return 0, err
}
}
return UnpackLayer(dest, layer, options)
}

View File

@@ -0,0 +1,97 @@
// +build ignore
// Simple tool to create an archive stream from an old and new directory
//
// By default it will stream the comparison of two temporary directories with junk files
package main
import (
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"github.com/Sirupsen/logrus"
"github.com/docker/docker/pkg/archive"
)
var (
flDebug = flag.Bool("D", false, "debugging output")
flNewDir = flag.String("newdir", "", "")
flOldDir = flag.String("olddir", "", "")
log = logrus.New()
)
func main() {
flag.Usage = func() {
fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)")
fmt.Printf("%s [OPTIONS]\n", os.Args[0])
flag.PrintDefaults()
}
flag.Parse()
log.Out = os.Stderr
if (len(os.Getenv("DEBUG")) > 0) || *flDebug {
logrus.SetLevel(logrus.DebugLevel)
}
var newDir, oldDir string
if len(*flNewDir) == 0 {
var err error
newDir, err = ioutil.TempDir("", "docker-test-newDir")
if err != nil {
log.Fatal(err)
}
defer os.RemoveAll(newDir)
if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil {
log.Fatal(err)
}
} else {
newDir = *flNewDir
}
if len(*flOldDir) == 0 {
oldDir, err := ioutil.TempDir("", "docker-test-oldDir")
if err != nil {
log.Fatal(err)
}
defer os.RemoveAll(oldDir)
} else {
oldDir = *flOldDir
}
changes, err := archive.ChangesDirs(newDir, oldDir)
if err != nil {
log.Fatal(err)
}
a, err := archive.ExportChanges(newDir, changes)
if err != nil {
log.Fatal(err)
}
defer a.Close()
i, err := io.Copy(os.Stdout, a)
if err != nil && err != io.EOF {
log.Fatal(err)
}
fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i)
}
func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) {
fileData := []byte("fooo")
for n := 0; n < numberOfFiles; n++ {
fileName := fmt.Sprintf("file-%d", n)
if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil {
return 0, err
}
if makeLinks {
if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil {
return 0, err
}
}
}
totalSize := numberOfFiles * len(fileData)
return totalSize, nil
}

View File

@@ -0,0 +1,16 @@
package archive
import (
"syscall"
"time"
)
func timeToTimespec(time time.Time) (ts syscall.Timespec) {
if time.IsZero() {
// Return UTIME_OMIT special value
ts.Sec = 0
ts.Nsec = ((1 << 30) - 2)
return
}
return syscall.NsecToTimespec(time.UnixNano())
}

View File

@@ -0,0 +1,16 @@
// +build !linux
package archive
import (
"syscall"
"time"
)
func timeToTimespec(time time.Time) (ts syscall.Timespec) {
nsec := int64(0)
if !time.IsZero() {
nsec = time.UnixNano()
}
return syscall.NsecToTimespec(nsec)
}

View File

@@ -0,0 +1,23 @@
package archive
// Whiteouts are files with a special meaning for the layered filesystem.
// Docker uses AUFS whiteout files inside exported archives. In other
// filesystems these files are generated/handled on tar creation/extraction.
// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a
// filename this means that file has been removed from the base layer.
const WhiteoutPrefix = ".wh."
// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not
// for removing an actual file. Normally these files are excluded from exported
// archives.
const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix
// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other
// layers. Normally these should not go into exported archives and all changed
// hardlinks should be copied to the top layer.
const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk"
// WhiteoutOpaqueDir file means directory has been made opaque - meaning
// readdir calls to this directory do not follow to lower layers.
const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq"

59
vendor/github.com/docker/docker/pkg/archive/wrap.go generated vendored Normal file
View File

@@ -0,0 +1,59 @@
package archive
import (
"archive/tar"
"bytes"
"io"
)
// Generate generates a new archive from the content provided
// as input.
//
// `files` is a sequence of path/content pairs. A new file is
// added to the archive for each pair.
// If the last pair is incomplete, the file is created with an
// empty content. For example:
//
// Generate("foo.txt", "hello world", "emptyfile")
//
// The above call will return an archive with 2 files:
// * ./foo.txt with content "hello world"
// * ./empty with empty content
//
// FIXME: stream content instead of buffering
// FIXME: specify permissions and other archive metadata
func Generate(input ...string) (io.Reader, error) {
files := parseStringPairs(input...)
buf := new(bytes.Buffer)
tw := tar.NewWriter(buf)
for _, file := range files {
name, content := file[0], file[1]
hdr := &tar.Header{
Name: name,
Size: int64(len(content)),
}
if err := tw.WriteHeader(hdr); err != nil {
return nil, err
}
if _, err := tw.Write([]byte(content)); err != nil {
return nil, err
}
}
if err := tw.Close(); err != nil {
return nil, err
}
return buf, nil
}
func parseStringPairs(input ...string) (output [][2]string) {
output = make([][2]string, 0, len(input)/2+1)
for i := 0; i < len(input); i += 2 {
var pair [2]string
pair[0] = input[i]
if i+1 < len(input) {
pair[1] = input[i+1]
}
output = append(output, pair)
}
return
}

View File

@@ -0,0 +1,285 @@
package fileutils
import (
"errors"
"fmt"
"io"
"os"
"path/filepath"
"regexp"
"strings"
"text/scanner"
"github.com/Sirupsen/logrus"
)
// exclusion returns true if the specified pattern is an exclusion
func exclusion(pattern string) bool {
return pattern[0] == '!'
}
// empty returns true if the specified pattern is empty
func empty(pattern string) bool {
return pattern == ""
}
// CleanPatterns takes a slice of patterns returns a new
// slice of patterns cleaned with filepath.Clean, stripped
// of any empty patterns and lets the caller know whether the
// slice contains any exception patterns (prefixed with !).
func CleanPatterns(patterns []string) ([]string, [][]string, bool, error) {
// Loop over exclusion patterns and:
// 1. Clean them up.
// 2. Indicate whether we are dealing with any exception rules.
// 3. Error if we see a single exclusion marker on its own (!).
cleanedPatterns := []string{}
patternDirs := [][]string{}
exceptions := false
for _, pattern := range patterns {
// Eliminate leading and trailing whitespace.
pattern = strings.TrimSpace(pattern)
if empty(pattern) {
continue
}
if exclusion(pattern) {
if len(pattern) == 1 {
return nil, nil, false, errors.New("Illegal exclusion pattern: !")
}
exceptions = true
}
pattern = filepath.Clean(pattern)
cleanedPatterns = append(cleanedPatterns, pattern)
if exclusion(pattern) {
pattern = pattern[1:]
}
patternDirs = append(patternDirs, strings.Split(pattern, string(os.PathSeparator)))
}
return cleanedPatterns, patternDirs, exceptions, nil
}
// Matches returns true if file matches any of the patterns
// and isn't excluded by any of the subsequent patterns.
func Matches(file string, patterns []string) (bool, error) {
file = filepath.Clean(file)
if file == "." {
// Don't let them exclude everything, kind of silly.
return false, nil
}
patterns, patDirs, _, err := CleanPatterns(patterns)
if err != nil {
return false, err
}
return OptimizedMatches(file, patterns, patDirs)
}
// OptimizedMatches is basically the same as fileutils.Matches() but optimized for archive.go.
// It will assume that the inputs have been preprocessed and therefore the function
// doesn't need to do as much error checking and clean-up. This was done to avoid
// repeating these steps on each file being checked during the archive process.
// The more generic fileutils.Matches() can't make these assumptions.
func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, error) {
matched := false
file = filepath.FromSlash(file)
parentPath := filepath.Dir(file)
parentPathDirs := strings.Split(parentPath, string(os.PathSeparator))
for i, pattern := range patterns {
negative := false
if exclusion(pattern) {
negative = true
pattern = pattern[1:]
}
match, err := regexpMatch(pattern, file)
if err != nil {
return false, fmt.Errorf("Error in pattern (%s): %s", pattern, err)
}
if !match && parentPath != "." {
// Check to see if the pattern matches one of our parent dirs.
if len(patDirs[i]) <= len(parentPathDirs) {
match, _ = regexpMatch(strings.Join(patDirs[i], string(os.PathSeparator)),
strings.Join(parentPathDirs[:len(patDirs[i])], string(os.PathSeparator)))
}
}
if match {
matched = !negative
}
}
if matched {
logrus.Debugf("Skipping excluded path: %s", file)
}
return matched, nil
}
// regexpMatch tries to match the logic of filepath.Match but
// does so using regexp logic. We do this so that we can expand the
// wildcard set to include other things, like "**" to mean any number
// of directories. This means that we should be backwards compatible
// with filepath.Match(). We'll end up supporting more stuff, due to
// the fact that we're using regexp, but that's ok - it does no harm.
//
// As per the comment in golangs filepath.Match, on Windows, escaping
// is disabled. Instead, '\\' is treated as path separator.
func regexpMatch(pattern, path string) (bool, error) {
regStr := "^"
// Do some syntax checking on the pattern.
// filepath's Match() has some really weird rules that are inconsistent
// so instead of trying to dup their logic, just call Match() for its
// error state and if there is an error in the pattern return it.
// If this becomes an issue we can remove this since its really only
// needed in the error (syntax) case - which isn't really critical.
if _, err := filepath.Match(pattern, path); err != nil {
return false, err
}
// Go through the pattern and convert it to a regexp.
// We use a scanner so we can support utf-8 chars.
var scan scanner.Scanner
scan.Init(strings.NewReader(pattern))
sl := string(os.PathSeparator)
escSL := sl
if sl == `\` {
escSL += `\`
}
for scan.Peek() != scanner.EOF {
ch := scan.Next()
if ch == '*' {
if scan.Peek() == '*' {
// is some flavor of "**"
scan.Next()
// Treat **/ as ** so eat the "/"
if string(scan.Peek()) == sl {
scan.Next()
}
if scan.Peek() == scanner.EOF {
// is "**EOF" - to align with .gitignore just accept all
regStr += ".*"
} else {
// is "**"
// Note that this allows for any # of /'s (even 0) because
// the .* will eat everything, even /'s
regStr += "(.*" + escSL + ")?"
}
} else {
// is "*" so map it to anything but "/"
regStr += "[^" + escSL + "]*"
}
} else if ch == '?' {
// "?" is any char except "/"
regStr += "[^" + escSL + "]"
} else if ch == '.' || ch == '$' {
// Escape some regexp special chars that have no meaning
// in golang's filepath.Match
regStr += `\` + string(ch)
} else if ch == '\\' {
// escape next char. Note that a trailing \ in the pattern
// will be left alone (but need to escape it)
if sl == `\` {
// On windows map "\" to "\\", meaning an escaped backslash,
// and then just continue because filepath.Match on
// Windows doesn't allow escaping at all
regStr += escSL
continue
}
if scan.Peek() != scanner.EOF {
regStr += `\` + string(scan.Next())
} else {
regStr += `\`
}
} else {
regStr += string(ch)
}
}
regStr += "$"
res, err := regexp.MatchString(regStr, path)
// Map regexp's error to filepath's so no one knows we're not using filepath
if err != nil {
err = filepath.ErrBadPattern
}
return res, err
}
// CopyFile copies from src to dst until either EOF is reached
// on src or an error occurs. It verifies src exists and removes
// the dst if it exists.
func CopyFile(src, dst string) (int64, error) {
cleanSrc := filepath.Clean(src)
cleanDst := filepath.Clean(dst)
if cleanSrc == cleanDst {
return 0, nil
}
sf, err := os.Open(cleanSrc)
if err != nil {
return 0, err
}
defer sf.Close()
if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) {
return 0, err
}
df, err := os.Create(cleanDst)
if err != nil {
return 0, err
}
defer df.Close()
return io.Copy(df, sf)
}
// ReadSymlinkedDirectory returns the target directory of a symlink.
// The target of the symbolic link may not be a file.
func ReadSymlinkedDirectory(path string) (string, error) {
var realPath string
var err error
if realPath, err = filepath.Abs(path); err != nil {
return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err)
}
if realPath, err = filepath.EvalSymlinks(realPath); err != nil {
return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err)
}
realPathInfo, err := os.Stat(realPath)
if err != nil {
return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err)
}
if !realPathInfo.Mode().IsDir() {
return "", fmt.Errorf("canonical path points to a file '%s'", realPath)
}
return realPath, nil
}
// CreateIfNotExists creates a file or a directory only if it does not already exist.
func CreateIfNotExists(path string, isDir bool) error {
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
if isDir {
return os.MkdirAll(path, 0755)
}
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
return err
}
f, err := os.OpenFile(path, os.O_CREATE, 0755)
if err != nil {
return err
}
f.Close()
}
}
return nil
}

View File

@@ -0,0 +1,27 @@
package fileutils
import (
"os"
"os/exec"
"strconv"
"strings"
)
// GetTotalUsedFds returns the number of used File Descriptors by
// executing `lsof -p PID`
func GetTotalUsedFds() int {
pid := os.Getpid()
cmd := exec.Command("lsof", "-p", strconv.Itoa(pid))
output, err := cmd.CombinedOutput()
if err != nil {
return -1
}
outputStr := strings.TrimSpace(string(output))
fds := strings.Split(outputStr, "\n")
return len(fds) - 1
}

View File

@@ -0,0 +1,7 @@
package fileutils
// GetTotalUsedFds Returns the number of used File Descriptors.
// On Solaris these limits are per process and not systemwide
func GetTotalUsedFds() int {
return -1
}

View File

@@ -0,0 +1,22 @@
// +build linux freebsd
package fileutils
import (
"fmt"
"io/ioutil"
"os"
"github.com/Sirupsen/logrus"
)
// GetTotalUsedFds Returns the number of used File Descriptors by
// reading it via /proc filesystem.
func GetTotalUsedFds() int {
if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err)
} else {
return len(fds)
}
return -1
}

View File

@@ -0,0 +1,7 @@
package fileutils
// GetTotalUsedFds Returns the number of used File Descriptors. Not supported
// on Windows.
func GetTotalUsedFds() int {
return -1
}

39
vendor/github.com/docker/docker/pkg/homedir/homedir.go generated vendored Normal file
View File

@@ -0,0 +1,39 @@
package homedir
import (
"os"
"runtime"
"github.com/opencontainers/runc/libcontainer/user"
)
// Key returns the env var name for the user's home dir based on
// the platform being run on
func Key() string {
if runtime.GOOS == "windows" {
return "USERPROFILE"
}
return "HOME"
}
// Get returns the home directory of the current user with the help of
// environment variables depending on the target operating system.
// Returned path should be used with "path/filepath" to form new paths.
func Get() string {
home := os.Getenv(Key())
if home == "" && runtime.GOOS != "windows" {
if u, err := user.CurrentUser(); err == nil {
return u.Home
}
}
return home
}
// GetShortcutString returns the string that is shortcut to user's home directory
// in the native shell of the platform running on.
func GetShortcutString() string {
if runtime.GOOS == "windows" {
return "%USERPROFILE%" // be careful while using in format functions
}
return "~"
}

View File

@@ -0,0 +1,23 @@
// +build linux
package homedir
import (
"os"
"github.com/docker/docker/pkg/idtools"
)
// GetStatic returns the home directory for the current user without calling
// os/user.Current(). This is useful for static-linked binary on glibc-based
// system, because a call to os/user.Current() in a static binary leads to
// segfault due to a glibc issue that won't be fixed in a short term.
// (#29344, golang/go#13470, https://sourceware.org/bugzilla/show_bug.cgi?id=19341)
func GetStatic() (string, error) {
uid := os.Getuid()
usr, err := idtools.LookupUID(uid)
if err != nil {
return "", err
}
return usr.Home, nil
}

View File

@@ -0,0 +1,13 @@
// +build !linux
package homedir
import (
"errors"
)
// GetStatic is not needed for non-linux systems.
// (Precisely, it is needed only for glibc-based linux systems.)
func GetStatic() (string, error) {
return "", errors.New("homedir.GetStatic() is not supported on this system")
}

197
vendor/github.com/docker/docker/pkg/idtools/idtools.go generated vendored Normal file
View File

@@ -0,0 +1,197 @@
package idtools
import (
"bufio"
"fmt"
"os"
"sort"
"strconv"
"strings"
)
// IDMap contains a single entry for user namespace range remapping. An array
// of IDMap entries represents the structure that will be provided to the Linux
// kernel for creating a user namespace.
type IDMap struct {
ContainerID int `json:"container_id"`
HostID int `json:"host_id"`
Size int `json:"size"`
}
type subIDRange struct {
Start int
Length int
}
type ranges []subIDRange
func (e ranges) Len() int { return len(e) }
func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] }
func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start }
const (
subuidFileName string = "/etc/subuid"
subgidFileName string = "/etc/subgid"
)
// MkdirAllAs creates a directory (include any along the path) and then modifies
// ownership to the requested uid/gid. If the directory already exists, this
// function will still change ownership to the requested uid/gid pair.
func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
return mkdirAs(path, mode, ownerUID, ownerGID, true, true)
}
// MkdirAllNewAs creates a directory (include any along the path) and then modifies
// ownership ONLY of newly created directories to the requested uid/gid. If the
// directories along the path exist, no change of ownership will be performed
func MkdirAllNewAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
return mkdirAs(path, mode, ownerUID, ownerGID, true, false)
}
// MkdirAs creates a directory and then modifies ownership to the requested uid/gid.
// If the directory already exists, this function still changes ownership
func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
return mkdirAs(path, mode, ownerUID, ownerGID, false, true)
}
// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps.
// If the maps are empty, then the root uid/gid will default to "real" 0/0
func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
var uid, gid int
if uidMap != nil {
xUID, err := ToHost(0, uidMap)
if err != nil {
return -1, -1, err
}
uid = xUID
}
if gidMap != nil {
xGID, err := ToHost(0, gidMap)
if err != nil {
return -1, -1, err
}
gid = xGID
}
return uid, gid, nil
}
// ToContainer takes an id mapping, and uses it to translate a
// host ID to the remapped ID. If no map is provided, then the translation
// assumes a 1-to-1 mapping and returns the passed in id
func ToContainer(hostID int, idMap []IDMap) (int, error) {
if idMap == nil {
return hostID, nil
}
for _, m := range idMap {
if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) {
contID := m.ContainerID + (hostID - m.HostID)
return contID, nil
}
}
return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID)
}
// ToHost takes an id mapping and a remapped ID, and translates the
// ID to the mapped host ID. If no map is provided, then the translation
// assumes a 1-to-1 mapping and returns the passed in id #
func ToHost(contID int, idMap []IDMap) (int, error) {
if idMap == nil {
return contID, nil
}
for _, m := range idMap {
if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) {
hostID := m.HostID + (contID - m.ContainerID)
return hostID, nil
}
}
return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID)
}
// CreateIDMappings takes a requested user and group name and
// using the data from /etc/sub{uid,gid} ranges, creates the
// proper uid and gid remapping ranges for that user/group pair
func CreateIDMappings(username, groupname string) ([]IDMap, []IDMap, error) {
subuidRanges, err := parseSubuid(username)
if err != nil {
return nil, nil, err
}
subgidRanges, err := parseSubgid(groupname)
if err != nil {
return nil, nil, err
}
if len(subuidRanges) == 0 {
return nil, nil, fmt.Errorf("No subuid ranges found for user %q", username)
}
if len(subgidRanges) == 0 {
return nil, nil, fmt.Errorf("No subgid ranges found for group %q", groupname)
}
return createIDMap(subuidRanges), createIDMap(subgidRanges), nil
}
func createIDMap(subidRanges ranges) []IDMap {
idMap := []IDMap{}
// sort the ranges by lowest ID first
sort.Sort(subidRanges)
containerID := 0
for _, idrange := range subidRanges {
idMap = append(idMap, IDMap{
ContainerID: containerID,
HostID: idrange.Start,
Size: idrange.Length,
})
containerID = containerID + idrange.Length
}
return idMap
}
func parseSubuid(username string) (ranges, error) {
return parseSubidFile(subuidFileName, username)
}
func parseSubgid(username string) (ranges, error) {
return parseSubidFile(subgidFileName, username)
}
// parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid)
// and return all found ranges for a specified username. If the special value
// "ALL" is supplied for username, then all ranges in the file will be returned
func parseSubidFile(path, username string) (ranges, error) {
var rangeList ranges
subidFile, err := os.Open(path)
if err != nil {
return rangeList, err
}
defer subidFile.Close()
s := bufio.NewScanner(subidFile)
for s.Scan() {
if err := s.Err(); err != nil {
return rangeList, err
}
text := strings.TrimSpace(s.Text())
if text == "" || strings.HasPrefix(text, "#") {
continue
}
parts := strings.Split(text, ":")
if len(parts) != 3 {
return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path)
}
if parts[0] == username || username == "ALL" {
startid, err := strconv.Atoi(parts[1])
if err != nil {
return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err)
}
length, err := strconv.Atoi(parts[2])
if err != nil {
return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err)
}
rangeList = append(rangeList, subIDRange{startid, length})
}
}
return rangeList, nil
}

View File

@@ -0,0 +1,204 @@
// +build !windows
package idtools
import (
"bytes"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"sync"
"github.com/docker/docker/pkg/system"
"github.com/opencontainers/runc/libcontainer/user"
)
var (
entOnce sync.Once
getentCmd string
)
func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error {
// make an array containing the original path asked for, plus (for mkAll == true)
// all path components leading up to the complete path that don't exist before we MkdirAll
// so that we can chown all of them properly at the end. If chownExisting is false, we won't
// chown the full directory path if it exists
var paths []string
if _, err := os.Stat(path); err != nil && os.IsNotExist(err) {
paths = []string{path}
} else if err == nil && chownExisting {
// short-circuit--we were called with an existing directory and chown was requested
return os.Chown(path, ownerUID, ownerGID)
} else if err == nil {
// nothing to do; directory path fully exists already and chown was NOT requested
return nil
}
if mkAll {
// walk back to "/" looking for directories which do not exist
// and add them to the paths array for chown after creation
dirPath := path
for {
dirPath = filepath.Dir(dirPath)
if dirPath == "/" {
break
}
if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) {
paths = append(paths, dirPath)
}
}
if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) {
return err
}
} else {
if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) {
return err
}
}
// even if it existed, we will chown the requested path + any subpaths that
// didn't exist when we called MkdirAll
for _, pathComponent := range paths {
if err := os.Chown(pathComponent, ownerUID, ownerGID); err != nil {
return err
}
}
return nil
}
// CanAccess takes a valid (existing) directory and a uid, gid pair and determines
// if that uid, gid pair has access (execute bit) to the directory
func CanAccess(path string, uid, gid int) bool {
statInfo, err := system.Stat(path)
if err != nil {
return false
}
fileMode := os.FileMode(statInfo.Mode())
permBits := fileMode.Perm()
return accessible(statInfo.UID() == uint32(uid),
statInfo.GID() == uint32(gid), permBits)
}
func accessible(isOwner, isGroup bool, perms os.FileMode) bool {
if isOwner && (perms&0100 == 0100) {
return true
}
if isGroup && (perms&0010 == 0010) {
return true
}
if perms&0001 == 0001 {
return true
}
return false
}
// LookupUser uses traditional local system files lookup (from libcontainer/user) on a username,
// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
func LookupUser(username string) (user.User, error) {
// first try a local system files lookup using existing capabilities
usr, err := user.LookupUser(username)
if err == nil {
return usr, nil
}
// local files lookup failed; attempt to call `getent` to query configured passwd dbs
usr, err = getentUser(fmt.Sprintf("%s %s", "passwd", username))
if err != nil {
return user.User{}, err
}
return usr, nil
}
// LookupUID uses traditional local system files lookup (from libcontainer/user) on a uid,
// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
func LookupUID(uid int) (user.User, error) {
// first try a local system files lookup using existing capabilities
usr, err := user.LookupUid(uid)
if err == nil {
return usr, nil
}
// local files lookup failed; attempt to call `getent` to query configured passwd dbs
return getentUser(fmt.Sprintf("%s %d", "passwd", uid))
}
func getentUser(args string) (user.User, error) {
reader, err := callGetent(args)
if err != nil {
return user.User{}, err
}
users, err := user.ParsePasswd(reader)
if err != nil {
return user.User{}, err
}
if len(users) == 0 {
return user.User{}, fmt.Errorf("getent failed to find passwd entry for %q", strings.Split(args, " ")[1])
}
return users[0], nil
}
// LookupGroup uses traditional local system files lookup (from libcontainer/user) on a group name,
// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
func LookupGroup(groupname string) (user.Group, error) {
// first try a local system files lookup using existing capabilities
group, err := user.LookupGroup(groupname)
if err == nil {
return group, nil
}
// local files lookup failed; attempt to call `getent` to query configured group dbs
return getentGroup(fmt.Sprintf("%s %s", "group", groupname))
}
// LookupGID uses traditional local system files lookup (from libcontainer/user) on a group ID,
// followed by a call to `getent` for supporting host configured non-files passwd and group dbs
func LookupGID(gid int) (user.Group, error) {
// first try a local system files lookup using existing capabilities
group, err := user.LookupGid(gid)
if err == nil {
return group, nil
}
// local files lookup failed; attempt to call `getent` to query configured group dbs
return getentGroup(fmt.Sprintf("%s %d", "group", gid))
}
func getentGroup(args string) (user.Group, error) {
reader, err := callGetent(args)
if err != nil {
return user.Group{}, err
}
groups, err := user.ParseGroup(reader)
if err != nil {
return user.Group{}, err
}
if len(groups) == 0 {
return user.Group{}, fmt.Errorf("getent failed to find groups entry for %q", strings.Split(args, " ")[1])
}
return groups[0], nil
}
func callGetent(args string) (io.Reader, error) {
entOnce.Do(func() { getentCmd, _ = resolveBinary("getent") })
// if no `getent` command on host, can't do anything else
if getentCmd == "" {
return nil, fmt.Errorf("")
}
out, err := execCmd(getentCmd, args)
if err != nil {
exitCode, errC := system.GetExitCode(err)
if errC != nil {
return nil, err
}
switch exitCode {
case 1:
return nil, fmt.Errorf("getent reported invalid parameters/database unknown")
case 2:
terms := strings.Split(args, " ")
return nil, fmt.Errorf("getent unable to find entry %q in %s database", terms[1], terms[0])
case 3:
return nil, fmt.Errorf("getent database doesn't support enumeration")
default:
return nil, err
}
}
return bytes.NewReader(out), nil
}

View File

@@ -0,0 +1,25 @@
// +build windows
package idtools
import (
"os"
"github.com/docker/docker/pkg/system"
)
// Platforms such as Windows do not support the UID/GID concept. So make this
// just a wrapper around system.MkdirAll.
func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error {
if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) {
return err
}
return nil
}
// CanAccess takes a valid (existing) directory and a uid, gid pair and determines
// if that uid, gid pair has access (execute bit) to the directory
// Windows does not require/support this function, so always return true
func CanAccess(path string, uid, gid int) bool {
return true
}

View File

@@ -0,0 +1,164 @@
package idtools
import (
"fmt"
"regexp"
"sort"
"strconv"
"strings"
"sync"
)
// add a user and/or group to Linux /etc/passwd, /etc/group using standard
// Linux distribution commands:
// adduser --system --shell /bin/false --disabled-login --disabled-password --no-create-home --group <username>
// useradd -r -s /bin/false <username>
var (
once sync.Once
userCommand string
cmdTemplates = map[string]string{
"adduser": "--system --shell /bin/false --no-create-home --disabled-login --disabled-password --group %s",
"useradd": "-r -s /bin/false %s",
"usermod": "-%s %d-%d %s",
}
idOutRegexp = regexp.MustCompile(`uid=([0-9]+).*gid=([0-9]+)`)
// default length for a UID/GID subordinate range
defaultRangeLen = 65536
defaultRangeStart = 100000
userMod = "usermod"
)
// AddNamespaceRangesUser takes a username and uses the standard system
// utility to create a system user/group pair used to hold the
// /etc/sub{uid,gid} ranges which will be used for user namespace
// mapping ranges in containers.
func AddNamespaceRangesUser(name string) (int, int, error) {
if err := addUser(name); err != nil {
return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err)
}
// Query the system for the created uid and gid pair
out, err := execCmd("id", name)
if err != nil {
return -1, -1, fmt.Errorf("Error trying to find uid/gid for new user %q: %v", name, err)
}
matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out)))
if len(matches) != 3 {
return -1, -1, fmt.Errorf("Can't find uid, gid from `id` output: %q", string(out))
}
uid, err := strconv.Atoi(matches[1])
if err != nil {
return -1, -1, fmt.Errorf("Can't convert found uid (%s) to int: %v", matches[1], err)
}
gid, err := strconv.Atoi(matches[2])
if err != nil {
return -1, -1, fmt.Errorf("Can't convert found gid (%s) to int: %v", matches[2], err)
}
// Now we need to create the subuid/subgid ranges for our new user/group (system users
// do not get auto-created ranges in subuid/subgid)
if err := createSubordinateRanges(name); err != nil {
return -1, -1, fmt.Errorf("Couldn't create subordinate ID ranges: %v", err)
}
return uid, gid, nil
}
func addUser(userName string) error {
once.Do(func() {
// set up which commands are used for adding users/groups dependent on distro
if _, err := resolveBinary("adduser"); err == nil {
userCommand = "adduser"
} else if _, err := resolveBinary("useradd"); err == nil {
userCommand = "useradd"
}
})
if userCommand == "" {
return fmt.Errorf("Cannot add user; no useradd/adduser binary found")
}
args := fmt.Sprintf(cmdTemplates[userCommand], userName)
out, err := execCmd(userCommand, args)
if err != nil {
return fmt.Errorf("Failed to add user with error: %v; output: %q", err, string(out))
}
return nil
}
func createSubordinateRanges(name string) error {
// first, we should verify that ranges weren't automatically created
// by the distro tooling
ranges, err := parseSubuid(name)
if err != nil {
return fmt.Errorf("Error while looking for subuid ranges for user %q: %v", name, err)
}
if len(ranges) == 0 {
// no UID ranges; let's create one
startID, err := findNextUIDRange()
if err != nil {
return fmt.Errorf("Can't find available subuid range: %v", err)
}
out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "v", startID, startID+defaultRangeLen-1, name))
if err != nil {
return fmt.Errorf("Unable to add subuid range to user: %q; output: %s, err: %v", name, out, err)
}
}
ranges, err = parseSubgid(name)
if err != nil {
return fmt.Errorf("Error while looking for subgid ranges for user %q: %v", name, err)
}
if len(ranges) == 0 {
// no GID ranges; let's create one
startID, err := findNextGIDRange()
if err != nil {
return fmt.Errorf("Can't find available subgid range: %v", err)
}
out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "w", startID, startID+defaultRangeLen-1, name))
if err != nil {
return fmt.Errorf("Unable to add subgid range to user: %q; output: %s, err: %v", name, out, err)
}
}
return nil
}
func findNextUIDRange() (int, error) {
ranges, err := parseSubuid("ALL")
if err != nil {
return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subuid file: %v", err)
}
sort.Sort(ranges)
return findNextRangeStart(ranges)
}
func findNextGIDRange() (int, error) {
ranges, err := parseSubgid("ALL")
if err != nil {
return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subgid file: %v", err)
}
sort.Sort(ranges)
return findNextRangeStart(ranges)
}
func findNextRangeStart(rangeList ranges) (int, error) {
startID := defaultRangeStart
for _, arange := range rangeList {
if wouldOverlap(arange, startID) {
startID = arange.Start + arange.Length
}
}
return startID, nil
}
func wouldOverlap(arange subIDRange, ID int) bool {
low := ID
high := ID + defaultRangeLen
if (low >= arange.Start && low <= arange.Start+arange.Length) ||
(high <= arange.Start+arange.Length && high >= arange.Start) {
return true
}
return false
}

View File

@@ -0,0 +1,12 @@
// +build !linux
package idtools
import "fmt"
// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair
// and calls the appropriate helper function to add the group and then
// the user to the group in /etc/group and /etc/passwd respectively.
func AddNamespaceRangesUser(name string) (int, int, error) {
return -1, -1, fmt.Errorf("No support for adding users or groups on this OS")
}

View File

@@ -0,0 +1,32 @@
// +build !windows
package idtools
import (
"fmt"
"os/exec"
"path/filepath"
"strings"
)
func resolveBinary(binname string) (string, error) {
binaryPath, err := exec.LookPath(binname)
if err != nil {
return "", err
}
resolvedPath, err := filepath.EvalSymlinks(binaryPath)
if err != nil {
return "", err
}
//only return no error if the final resolved binary basename
//matches what was searched for
if filepath.Base(resolvedPath) == binname {
return resolvedPath, nil
}
return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath)
}
func execCmd(cmd, args string) ([]byte, error) {
execCmd := exec.Command(cmd, strings.Split(args, " ")...)
return execCmd.CombinedOutput()
}

51
vendor/github.com/docker/docker/pkg/ioutils/buffer.go generated vendored Normal file
View File

@@ -0,0 +1,51 @@
package ioutils
import (
"errors"
"io"
)
var errBufferFull = errors.New("buffer is full")
type fixedBuffer struct {
buf []byte
pos int
lastRead int
}
func (b *fixedBuffer) Write(p []byte) (int, error) {
n := copy(b.buf[b.pos:cap(b.buf)], p)
b.pos += n
if n < len(p) {
if b.pos == cap(b.buf) {
return n, errBufferFull
}
return n, io.ErrShortWrite
}
return n, nil
}
func (b *fixedBuffer) Read(p []byte) (int, error) {
n := copy(p, b.buf[b.lastRead:b.pos])
b.lastRead += n
return n, nil
}
func (b *fixedBuffer) Len() int {
return b.pos - b.lastRead
}
func (b *fixedBuffer) Cap() int {
return cap(b.buf)
}
func (b *fixedBuffer) Reset() {
b.pos = 0
b.lastRead = 0
b.buf = b.buf[:0]
}
func (b *fixedBuffer) String() string {
return string(b.buf[b.lastRead:b.pos])
}

View File

@@ -0,0 +1,186 @@
package ioutils
import (
"errors"
"io"
"sync"
)
// maxCap is the highest capacity to use in byte slices that buffer data.
const maxCap = 1e6
// minCap is the lowest capacity to use in byte slices that buffer data
const minCap = 64
// blockThreshold is the minimum number of bytes in the buffer which will cause
// a write to BytesPipe to block when allocating a new slice.
const blockThreshold = 1e6
var (
// ErrClosed is returned when Write is called on a closed BytesPipe.
ErrClosed = errors.New("write to closed BytesPipe")
bufPools = make(map[int]*sync.Pool)
bufPoolsLock sync.Mutex
)
// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue).
// All written data may be read at most once. Also, BytesPipe allocates
// and releases new byte slices to adjust to current needs, so the buffer
// won't be overgrown after peak loads.
type BytesPipe struct {
mu sync.Mutex
wait *sync.Cond
buf []*fixedBuffer
bufLen int
closeErr error // error to return from next Read. set to nil if not closed.
}
// NewBytesPipe creates new BytesPipe, initialized by specified slice.
// If buf is nil, then it will be initialized with slice which cap is 64.
// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf).
func NewBytesPipe() *BytesPipe {
bp := &BytesPipe{}
bp.buf = append(bp.buf, getBuffer(minCap))
bp.wait = sync.NewCond(&bp.mu)
return bp
}
// Write writes p to BytesPipe.
// It can allocate new []byte slices in a process of writing.
func (bp *BytesPipe) Write(p []byte) (int, error) {
bp.mu.Lock()
written := 0
loop0:
for {
if bp.closeErr != nil {
bp.mu.Unlock()
return written, ErrClosed
}
if len(bp.buf) == 0 {
bp.buf = append(bp.buf, getBuffer(64))
}
// get the last buffer
b := bp.buf[len(bp.buf)-1]
n, err := b.Write(p)
written += n
bp.bufLen += n
// errBufferFull is an error we expect to get if the buffer is full
if err != nil && err != errBufferFull {
bp.wait.Broadcast()
bp.mu.Unlock()
return written, err
}
// if there was enough room to write all then break
if len(p) == n {
break
}
// more data: write to the next slice
p = p[n:]
// make sure the buffer doesn't grow too big from this write
for bp.bufLen >= blockThreshold {
bp.wait.Wait()
if bp.closeErr != nil {
continue loop0
}
}
// add new byte slice to the buffers slice and continue writing
nextCap := b.Cap() * 2
if nextCap > maxCap {
nextCap = maxCap
}
bp.buf = append(bp.buf, getBuffer(nextCap))
}
bp.wait.Broadcast()
bp.mu.Unlock()
return written, nil
}
// CloseWithError causes further reads from a BytesPipe to return immediately.
func (bp *BytesPipe) CloseWithError(err error) error {
bp.mu.Lock()
if err != nil {
bp.closeErr = err
} else {
bp.closeErr = io.EOF
}
bp.wait.Broadcast()
bp.mu.Unlock()
return nil
}
// Close causes further reads from a BytesPipe to return immediately.
func (bp *BytesPipe) Close() error {
return bp.CloseWithError(nil)
}
// Read reads bytes from BytesPipe.
// Data could be read only once.
func (bp *BytesPipe) Read(p []byte) (n int, err error) {
bp.mu.Lock()
if bp.bufLen == 0 {
if bp.closeErr != nil {
bp.mu.Unlock()
return 0, bp.closeErr
}
bp.wait.Wait()
if bp.bufLen == 0 && bp.closeErr != nil {
err := bp.closeErr
bp.mu.Unlock()
return 0, err
}
}
for bp.bufLen > 0 {
b := bp.buf[0]
read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error
n += read
bp.bufLen -= read
if b.Len() == 0 {
// it's empty so return it to the pool and move to the next one
returnBuffer(b)
bp.buf[0] = nil
bp.buf = bp.buf[1:]
}
if len(p) == read {
break
}
p = p[read:]
}
bp.wait.Broadcast()
bp.mu.Unlock()
return
}
func returnBuffer(b *fixedBuffer) {
b.Reset()
bufPoolsLock.Lock()
pool := bufPools[b.Cap()]
bufPoolsLock.Unlock()
if pool != nil {
pool.Put(b)
}
}
func getBuffer(size int) *fixedBuffer {
bufPoolsLock.Lock()
pool, ok := bufPools[size]
if !ok {
pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }}
bufPools[size] = pool
}
bufPoolsLock.Unlock()
return pool.Get().(*fixedBuffer)
}

Some files were not shown because too many files have changed in this diff Show More