mirror of
https://github.com/optim-enterprises-bv/vault.git
synced 2025-11-01 19:17:58 +00:00
Bump deps
This commit is contained in:
2
vendor/cloud.google.com/go/storage/copy.go
generated
vendored
2
vendor/cloud.google.com/go/storage/copy.go
generated
vendored
@@ -78,7 +78,7 @@ func (c *Copier) Run(ctx context.Context) (*ObjectAttrs, error) {
|
||||
return nil, err
|
||||
}
|
||||
if c.ProgressFunc != nil {
|
||||
c.ProgressFunc(res.TotalBytesRewritten, res.ObjectSize)
|
||||
c.ProgressFunc(uint64(res.TotalBytesRewritten), uint64(res.ObjectSize))
|
||||
}
|
||||
if res.Done { // Finished successfully.
|
||||
return newObject(res.Resource), nil
|
||||
|
||||
27
vendor/cloud.google.com/go/storage/writer.go
generated
vendored
27
vendor/cloud.google.com/go/storage/writer.go
generated
vendored
@@ -49,6 +49,16 @@ type Writer struct {
|
||||
// must be done before the first Write call.
|
||||
ChunkSize int
|
||||
|
||||
// ProgressFunc can be used to monitor the progress of a large write.
|
||||
// operation. If ProgressFunc is not nil and writing requires multiple
|
||||
// calls to the underlying service (see
|
||||
// https://cloud.google.com/storage/docs/json_api/v1/how-tos/resumable-upload),
|
||||
// then ProgressFunc will be invoked after each call with the number of bytes of
|
||||
// content copied so far.
|
||||
//
|
||||
// ProgressFunc should return quickly without blocking.
|
||||
ProgressFunc func(int64)
|
||||
|
||||
ctx context.Context
|
||||
o *ObjectHandle
|
||||
|
||||
@@ -98,6 +108,9 @@ func (w *Writer) open() error {
|
||||
Media(pr, mediaOpts...).
|
||||
Projection("full").
|
||||
Context(w.ctx)
|
||||
if w.ProgressFunc != nil {
|
||||
call.ProgressUpdater(func(n, _ int64) { w.ProgressFunc(n) })
|
||||
}
|
||||
if err := setEncryptionHeaders(call.Header(), w.o.encryptionKey, false); err != nil {
|
||||
w.err = err
|
||||
pr.CloseWithError(w.err)
|
||||
@@ -107,7 +120,14 @@ func (w *Writer) open() error {
|
||||
err := applyConds("NewWriter", w.o.gen, w.o.conds, call)
|
||||
if err == nil {
|
||||
setClientHeader(call.Header())
|
||||
resp, err = call.Do()
|
||||
// We will only retry here if the initial POST, which obtains a URI for
|
||||
// the resumable upload, fails with a retryable error. The upload itself
|
||||
// has its own retry logic.
|
||||
err = runWithRetry(w.ctx, func() error {
|
||||
var err2 error
|
||||
resp, err2 = call.Do()
|
||||
return err2
|
||||
})
|
||||
}
|
||||
if err != nil {
|
||||
w.err = err
|
||||
@@ -120,6 +140,11 @@ func (w *Writer) open() error {
|
||||
}
|
||||
|
||||
// Write appends to w. It implements the io.Writer interface.
|
||||
//
|
||||
// Since writes happen asynchronously, Write may return a nil
|
||||
// error even though the write failed (or will fail). Always
|
||||
// use the error returned from Writer.Close to determine if
|
||||
// the upload was successful.
|
||||
func (w *Writer) Write(p []byte) (n int, err error) {
|
||||
if w.err != nil {
|
||||
return 0, w.err
|
||||
|
||||
22
vendor/github.com/Jeffail/gabs/gabs.go
generated
vendored
22
vendor/github.com/Jeffail/gabs/gabs.go
generated
vendored
@@ -72,6 +72,9 @@ type Container struct {
|
||||
|
||||
// Data - Return the contained data as an interface{}.
|
||||
func (g *Container) Data() interface{} {
|
||||
if g == nil {
|
||||
return nil
|
||||
}
|
||||
return g.object
|
||||
}
|
||||
|
||||
@@ -89,25 +92,28 @@ func (g *Container) Path(path string) *Container {
|
||||
func (g *Container) Search(hierarchy ...string) *Container {
|
||||
var object interface{}
|
||||
|
||||
object = g.object
|
||||
object = g.Data()
|
||||
for target := 0; target < len(hierarchy); target++ {
|
||||
if mmap, ok := object.(map[string]interface{}); ok {
|
||||
object = mmap[hierarchy[target]]
|
||||
object, ok = mmap[hierarchy[target]]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
} else if marray, ok := object.([]interface{}); ok {
|
||||
tmpArray := []interface{}{}
|
||||
for _, val := range marray {
|
||||
tmpGabs := &Container{val}
|
||||
res := tmpGabs.Search(hierarchy[target:]...).Data()
|
||||
res := tmpGabs.Search(hierarchy[target:]...)
|
||||
if res != nil {
|
||||
tmpArray = append(tmpArray, res)
|
||||
tmpArray = append(tmpArray, res.Data())
|
||||
}
|
||||
}
|
||||
if len(tmpArray) == 0 {
|
||||
return &Container{nil}
|
||||
return nil
|
||||
}
|
||||
return &Container{tmpArray}
|
||||
} else {
|
||||
return &Container{nil}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return &Container{object}
|
||||
@@ -120,7 +126,7 @@ func (g *Container) S(hierarchy ...string) *Container {
|
||||
|
||||
// Exists - Checks whether a path exists.
|
||||
func (g *Container) Exists(hierarchy ...string) bool {
|
||||
return g.Search(hierarchy...).Data() != nil
|
||||
return g.Search(hierarchy...) != nil
|
||||
}
|
||||
|
||||
// ExistsP - Checks whether a dot notation path exists.
|
||||
@@ -385,7 +391,7 @@ func (g *Container) ArrayCountP(path string) (int, error) {
|
||||
|
||||
// Bytes - Converts the contained object back to a JSON []byte blob.
|
||||
func (g *Container) Bytes() []byte {
|
||||
if g.object != nil {
|
||||
if g.Data() != nil {
|
||||
if bytes, err := json.Marshal(g.object); err == nil {
|
||||
return bytes
|
||||
}
|
||||
|
||||
6
vendor/github.com/Sirupsen/logrus/CHANGELOG.md
generated
vendored
6
vendor/github.com/Sirupsen/logrus/CHANGELOG.md
generated
vendored
@@ -1,3 +1,9 @@
|
||||
# 1.0.0
|
||||
|
||||
* Officially changed name to lower-case
|
||||
* bug: colors on Windows 10 (#541)
|
||||
* bug: fix race in accessing level (#512)
|
||||
|
||||
# 0.11.5
|
||||
|
||||
* feature: add writer and writerlevel to entry (#372)
|
||||
|
||||
18
vendor/github.com/Sirupsen/logrus/README.md
generated
vendored
18
vendor/github.com/Sirupsen/logrus/README.md
generated
vendored
@@ -1,11 +1,5 @@
|
||||
# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/> [](https://travis-ci.org/sirupsen/logrus) [](https://godoc.org/github.com/sirupsen/logrus)
|
||||
|
||||
**Seeing weird case-sensitive problems?** See [this
|
||||
issue](https://github.com/sirupsen/logrus/issues/451#issuecomment-264332021).
|
||||
This change has been reverted. I apologize for causing this. I greatly
|
||||
underestimated the impact this would have. Logrus strives for stability and
|
||||
backwards compatibility and failed to provide that.
|
||||
|
||||
Logrus is a structured logger for Go (golang), completely API compatible with
|
||||
the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
|
||||
yet stable (pre 1.0). Logrus itself is completely stable and has been used in
|
||||
@@ -13,6 +7,17 @@ many large deployments. The core API is unlikely to change much but please
|
||||
version control your Logrus to make sure you aren't fetching latest `master` on
|
||||
every build.**
|
||||
|
||||
**Seeing weird case-sensitive problems?** Unfortunately, the author failed to
|
||||
realize the consequences of renaming to lower-case. Due to the Go package
|
||||
environment, this caused issues. Regretfully, there's no turning back now.
|
||||
Everything using `logrus` will need to use the lower-case:
|
||||
`github.com/sirupsen/logrus`. Any package that isn't, should be changed.
|
||||
|
||||
I am terribly sorry for this inconvenience. Logrus strives hard for backwards
|
||||
compatibility, and the author failed to realize the cascading consequences of
|
||||
such a name-change. To fix Glide, see [these
|
||||
comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437).
|
||||
|
||||
Nicely color-coded in development (when a TTY is attached, otherwise just
|
||||
plain text):
|
||||
|
||||
@@ -276,6 +281,7 @@ Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/v
|
||||
| [Stackdriver](https://github.com/knq/sdhook) | Hook for logging to [Google Stackdriver](https://cloud.google.com/logging/) |
|
||||
| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)|
|
||||
| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
|
||||
| [Syslog TLS](https://github.com/shinji62/logrus-syslog-ng) | Send errors to remote syslog server with TLS support. |
|
||||
| [TraceView](https://github.com/evalphobia/logrus_appneta) | Hook for logging to [AppNeta TraceView](https://www.appneta.com/products/traceview/) |
|
||||
| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) |
|
||||
| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash |
|
||||
|
||||
19
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
19
vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go
generated
vendored
@@ -91,6 +91,7 @@ const (
|
||||
FirehoseServiceID = "firehose" // Firehose.
|
||||
GameliftServiceID = "gamelift" // Gamelift.
|
||||
GlacierServiceID = "glacier" // Glacier.
|
||||
GreengrassServiceID = "greengrass" // Greengrass.
|
||||
HealthServiceID = "health" // Health.
|
||||
IamServiceID = "iam" // Iam.
|
||||
ImportexportServiceID = "importexport" // Importexport.
|
||||
@@ -263,6 +264,7 @@ var awsPartition = partition{
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
@@ -469,12 +471,16 @@ var awsPartition = partition{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-northeast-2": endpoint{},
|
||||
"ap-southeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
@@ -963,6 +969,16 @@ var awsPartition = partition{
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"greengrass": service{
|
||||
IsRegionalized: boxedTrue,
|
||||
Defaults: endpoint{
|
||||
Protocols: []string{"https"},
|
||||
},
|
||||
Endpoints: endpoints{
|
||||
"us-east-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
"health": service{
|
||||
|
||||
Endpoints: endpoints{
|
||||
@@ -1006,6 +1022,7 @@ var awsPartition = partition{
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
"us-west-2": endpoint{},
|
||||
},
|
||||
},
|
||||
@@ -1085,6 +1102,7 @@ var awsPartition = partition{
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"eu-west-2": endpoint{},
|
||||
"sa-east-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
"us-east-2": endpoint{},
|
||||
"us-west-1": endpoint{},
|
||||
@@ -1534,6 +1552,7 @@ var awsPartition = partition{
|
||||
|
||||
Endpoints: endpoints{
|
||||
"ap-northeast-1": endpoint{},
|
||||
"ap-southeast-2": endpoint{},
|
||||
"eu-central-1": endpoint{},
|
||||
"eu-west-1": endpoint{},
|
||||
"us-east-1": endpoint{},
|
||||
|
||||
2
vendor/github.com/aws/aws-sdk-go/aws/request/validation.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/request/validation.go
generated
vendored
@@ -220,7 +220,7 @@ type ErrParamMinLen struct {
|
||||
func NewErrParamMinLen(field string, min int) *ErrParamMinLen {
|
||||
return &ErrParamMinLen{
|
||||
errInvalidParam: errInvalidParam{
|
||||
code: ParamMinValueErrCode,
|
||||
code: ParamMinLenErrCode,
|
||||
field: field,
|
||||
msg: fmt.Sprintf("minimum field size of %v", min),
|
||||
},
|
||||
|
||||
2
vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/signer/v4/v4.go
generated
vendored
@@ -402,7 +402,7 @@ var SignRequestHandler = request.NamedHandler{
|
||||
}
|
||||
|
||||
// SignSDKRequest signs an AWS request with the V4 signature. This
|
||||
// request handler is bested used only with the SDK's built in service client's
|
||||
// request handler should only be used with the SDK's built in service client's
|
||||
// API operation requests.
|
||||
//
|
||||
// This function should not be used on its on its own, but in conjunction with
|
||||
|
||||
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/aws/version.go
generated
vendored
@@ -5,4 +5,4 @@ package aws
|
||||
const SDKName = "aws-sdk-go"
|
||||
|
||||
// SDKVersion is the version of this SDK
|
||||
const SDKVersion = "1.8.34"
|
||||
const SDKVersion = "1.8.43"
|
||||
|
||||
2
vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/decode.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/decode.go
generated
vendored
@@ -16,7 +16,7 @@ import (
|
||||
// Value int
|
||||
// }
|
||||
//
|
||||
// type (u *exampleUnmarshaler) UnmarshalDynamoDBAttributeValue(av *dynamodb.AttributeValue) error {
|
||||
// func (u *exampleUnmarshaler) UnmarshalDynamoDBAttributeValue(av *dynamodb.AttributeValue) error {
|
||||
// if av.N == nil {
|
||||
// return nil
|
||||
// }
|
||||
|
||||
508
vendor/github.com/aws/aws-sdk-go/service/ec2/api.go
generated
vendored
508
vendor/github.com/aws/aws-sdk-go/service/ec2/api.go
generated
vendored
@@ -2367,7 +2367,8 @@ func (c *EC2) CopyImageRequest(input *CopyImageInput) (req *request.Request, out
|
||||
// region. You specify the destination region by using its endpoint when making
|
||||
// the request.
|
||||
//
|
||||
// For more information, see Copying AMIs (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/CopyingAMIs.html)
|
||||
// For more information about the prerequisites and limits when copying an AMI,
|
||||
// see Copying an AMI (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/CopyingAMIs.html)
|
||||
// in the Amazon Elastic Compute Cloud User Guide.
|
||||
//
|
||||
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
||||
@@ -7978,6 +7979,83 @@ func (c *EC2) DescribeFlowLogsWithContext(ctx aws.Context, input *DescribeFlowLo
|
||||
return out, req.Send()
|
||||
}
|
||||
|
||||
const opDescribeFpgaImages = "DescribeFpgaImages"
|
||||
|
||||
// DescribeFpgaImagesRequest generates a "aws/request.Request" representing the
|
||||
// client's request for the DescribeFpgaImages operation. The "output" return
|
||||
// value can be used to capture response data after the request's "Send" method
|
||||
// is called.
|
||||
//
|
||||
// See DescribeFpgaImages for usage and error information.
|
||||
//
|
||||
// Creating a request object using this method should be used when you want to inject
|
||||
// custom logic into the request's lifecycle using a custom handler, or if you want to
|
||||
// access properties on the request object before or after sending the request. If
|
||||
// you just want the service response, call the DescribeFpgaImages method directly
|
||||
// instead.
|
||||
//
|
||||
// Note: You must call the "Send" method on the returned request object in order
|
||||
// to execute the request.
|
||||
//
|
||||
// // Example sending a request using the DescribeFpgaImagesRequest method.
|
||||
// req, resp := client.DescribeFpgaImagesRequest(params)
|
||||
//
|
||||
// err := req.Send()
|
||||
// if err == nil { // resp is now filled
|
||||
// fmt.Println(resp)
|
||||
// }
|
||||
//
|
||||
// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFpgaImages
|
||||
func (c *EC2) DescribeFpgaImagesRequest(input *DescribeFpgaImagesInput) (req *request.Request, output *DescribeFpgaImagesOutput) {
|
||||
op := &request.Operation{
|
||||
Name: opDescribeFpgaImages,
|
||||
HTTPMethod: "POST",
|
||||
HTTPPath: "/",
|
||||
}
|
||||
|
||||
if input == nil {
|
||||
input = &DescribeFpgaImagesInput{}
|
||||
}
|
||||
|
||||
output = &DescribeFpgaImagesOutput{}
|
||||
req = c.newRequest(op, input, output)
|
||||
return
|
||||
}
|
||||
|
||||
// DescribeFpgaImages API operation for Amazon Elastic Compute Cloud.
|
||||
//
|
||||
// Describes one or more available Amazon FPGA Images (AFIs). These include
|
||||
// public AFIs, private AFIs that you own, and AFIs owned by other AWS accounts
|
||||
// for which you have load permissions.
|
||||
//
|
||||
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions
|
||||
// with awserr.Error's Code and Message methods to get detailed information about
|
||||
// the error.
|
||||
//
|
||||
// See the AWS API reference guide for Amazon Elastic Compute Cloud's
|
||||
// API operation DescribeFpgaImages for usage and error information.
|
||||
// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFpgaImages
|
||||
func (c *EC2) DescribeFpgaImages(input *DescribeFpgaImagesInput) (*DescribeFpgaImagesOutput, error) {
|
||||
req, out := c.DescribeFpgaImagesRequest(input)
|
||||
return out, req.Send()
|
||||
}
|
||||
|
||||
// DescribeFpgaImagesWithContext is the same as DescribeFpgaImages with the addition of
|
||||
// the ability to pass a context and additional request options.
|
||||
//
|
||||
// See DescribeFpgaImages for details on how to use this API operation.
|
||||
//
|
||||
// The context must be non-nil and will be used for request cancellation. If
|
||||
// the context is nil a panic will occur. In the future the SDK may create
|
||||
// sub-contexts for http.Requests. See https://golang.org/pkg/context/
|
||||
// for more information on using Contexts.
|
||||
func (c *EC2) DescribeFpgaImagesWithContext(ctx aws.Context, input *DescribeFpgaImagesInput, opts ...request.Option) (*DescribeFpgaImagesOutput, error) {
|
||||
req, out := c.DescribeFpgaImagesRequest(input)
|
||||
req.SetContext(ctx)
|
||||
req.ApplyOptions(opts...)
|
||||
return out, req.Send()
|
||||
}
|
||||
|
||||
const opDescribeHostReservationOfferings = "DescribeHostReservationOfferings"
|
||||
|
||||
// DescribeHostReservationOfferingsRequest generates a "aws/request.Request" representing the
|
||||
@@ -29313,6 +29391,164 @@ func (s *DescribeFlowLogsOutput) SetNextToken(v string) *DescribeFlowLogsOutput
|
||||
return s
|
||||
}
|
||||
|
||||
// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFpgaImagesRequest
|
||||
type DescribeFpgaImagesInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// Checks whether you have the required permissions for the action, without
|
||||
// actually making the request, and provides an error response. If you have
|
||||
// the required permissions, the error response is DryRunOperation. Otherwise,
|
||||
// it is UnauthorizedOperation.
|
||||
DryRun *bool `type:"boolean"`
|
||||
|
||||
// One or more filters.
|
||||
//
|
||||
// * create-time - The creation time of the AFI.
|
||||
//
|
||||
// * fpga-image-id - The FPGA image identifier (AFI ID).
|
||||
//
|
||||
// * fpga-image-global-id - The global FPGA image identifier (AGFI ID).
|
||||
//
|
||||
// * name - The name of the AFI.
|
||||
//
|
||||
// * owner-id - The AWS account ID of the AFI owner.
|
||||
//
|
||||
// * product-code - The product code.
|
||||
//
|
||||
// * shell-version - The version of the AWS Shell that was used to create
|
||||
// the bitstream.
|
||||
//
|
||||
// * state - The state of the AFI (pending | failed | available | unavailable).
|
||||
//
|
||||
// * tag:key=value - The key/value combination of a tag assigned to the resource.
|
||||
// Specify the key of the tag in the filter name and the value of the tag
|
||||
// in the filter value. For example, for the tag Purpose=X, specify tag:Purpose
|
||||
// for the filter name and X for the filter value.
|
||||
//
|
||||
// * tag-key - The key of a tag assigned to the resource. This filter is
|
||||
// independent of the tag-value filter. For example, if you use both the
|
||||
// filter "tag-key=Purpose" and the filter "tag-value=X", you get any resources
|
||||
// assigned both the tag key Purpose (regardless of what the tag's value
|
||||
// is), and the tag value X (regardless of what the tag's key is). If you
|
||||
// want to list only resources where Purpose is X, see the tag:key=value
|
||||
// filter.
|
||||
//
|
||||
// * tag-value - The value of a tag assigned to the resource. This filter
|
||||
// is independent of the tag-key filter.
|
||||
//
|
||||
// * update-time - The time of the most recent update.
|
||||
Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"`
|
||||
|
||||
// One or more AFI IDs.
|
||||
FpgaImageIds []*string `locationName:"FpgaImageId" locationNameList:"item" type:"list"`
|
||||
|
||||
// The maximum number of results to return in a single call.
|
||||
MaxResults *int64 `min:"5" type:"integer"`
|
||||
|
||||
// The token to retrieve the next page of results.
|
||||
NextToken *string `min:"1" type:"string"`
|
||||
|
||||
// Filters the AFI by owner. Specify an AWS account ID, self (owner is the sender
|
||||
// of the request), or an AWS owner alias (valid values are amazon | aws-marketplace).
|
||||
Owners []*string `locationName:"Owner" locationNameList:"Owner" type:"list"`
|
||||
}
|
||||
|
||||
// String returns the string representation
|
||||
func (s DescribeFpgaImagesInput) String() string {
|
||||
return awsutil.Prettify(s)
|
||||
}
|
||||
|
||||
// GoString returns the string representation
|
||||
func (s DescribeFpgaImagesInput) GoString() string {
|
||||
return s.String()
|
||||
}
|
||||
|
||||
// Validate inspects the fields of the type to determine if they are valid.
|
||||
func (s *DescribeFpgaImagesInput) Validate() error {
|
||||
invalidParams := request.ErrInvalidParams{Context: "DescribeFpgaImagesInput"}
|
||||
if s.MaxResults != nil && *s.MaxResults < 5 {
|
||||
invalidParams.Add(request.NewErrParamMinValue("MaxResults", 5))
|
||||
}
|
||||
if s.NextToken != nil && len(*s.NextToken) < 1 {
|
||||
invalidParams.Add(request.NewErrParamMinLen("NextToken", 1))
|
||||
}
|
||||
|
||||
if invalidParams.Len() > 0 {
|
||||
return invalidParams
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetDryRun sets the DryRun field's value.
|
||||
func (s *DescribeFpgaImagesInput) SetDryRun(v bool) *DescribeFpgaImagesInput {
|
||||
s.DryRun = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetFilters sets the Filters field's value.
|
||||
func (s *DescribeFpgaImagesInput) SetFilters(v []*Filter) *DescribeFpgaImagesInput {
|
||||
s.Filters = v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetFpgaImageIds sets the FpgaImageIds field's value.
|
||||
func (s *DescribeFpgaImagesInput) SetFpgaImageIds(v []*string) *DescribeFpgaImagesInput {
|
||||
s.FpgaImageIds = v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetMaxResults sets the MaxResults field's value.
|
||||
func (s *DescribeFpgaImagesInput) SetMaxResults(v int64) *DescribeFpgaImagesInput {
|
||||
s.MaxResults = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetNextToken sets the NextToken field's value.
|
||||
func (s *DescribeFpgaImagesInput) SetNextToken(v string) *DescribeFpgaImagesInput {
|
||||
s.NextToken = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetOwners sets the Owners field's value.
|
||||
func (s *DescribeFpgaImagesInput) SetOwners(v []*string) *DescribeFpgaImagesInput {
|
||||
s.Owners = v
|
||||
return s
|
||||
}
|
||||
|
||||
// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeFpgaImagesResult
|
||||
type DescribeFpgaImagesOutput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// Information about one or more FPGA images.
|
||||
FpgaImages []*FpgaImage `locationName:"fpgaImageSet" locationNameList:"item" type:"list"`
|
||||
|
||||
// The token to use to retrieve the next page of results. This value is null
|
||||
// when there are no more results to return.
|
||||
NextToken *string `locationName:"nextToken" min:"1" type:"string"`
|
||||
}
|
||||
|
||||
// String returns the string representation
|
||||
func (s DescribeFpgaImagesOutput) String() string {
|
||||
return awsutil.Prettify(s)
|
||||
}
|
||||
|
||||
// GoString returns the string representation
|
||||
func (s DescribeFpgaImagesOutput) GoString() string {
|
||||
return s.String()
|
||||
}
|
||||
|
||||
// SetFpgaImages sets the FpgaImages field's value.
|
||||
func (s *DescribeFpgaImagesOutput) SetFpgaImages(v []*FpgaImage) *DescribeFpgaImagesOutput {
|
||||
s.FpgaImages = v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetNextToken sets the NextToken field's value.
|
||||
func (s *DescribeFpgaImagesOutput) SetNextToken(v string) *DescribeFpgaImagesOutput {
|
||||
s.NextToken = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/DescribeHostReservationOfferingsRequest
|
||||
type DescribeHostReservationOfferingsInput struct {
|
||||
_ struct{} `type:"structure"`
|
||||
@@ -38390,6 +38626,182 @@ func (s *FlowLog) SetTrafficType(v string) *FlowLog {
|
||||
return s
|
||||
}
|
||||
|
||||
// Describes an Amazon FPGA image (AFI).
|
||||
// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/FpgaImage
|
||||
type FpgaImage struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// The date and time the AFI was created.
|
||||
CreateTime *time.Time `locationName:"createTime" type:"timestamp" timestampFormat:"iso8601"`
|
||||
|
||||
// The description of the AFI.
|
||||
Description *string `locationName:"description" type:"string"`
|
||||
|
||||
// The global FPGA image identifier (AGFI ID).
|
||||
FpgaImageGlobalId *string `locationName:"fpgaImageGlobalId" type:"string"`
|
||||
|
||||
// The FPGA image identifier (AFI ID).
|
||||
FpgaImageId *string `locationName:"fpgaImageId" type:"string"`
|
||||
|
||||
// The name of the AFI.
|
||||
Name *string `locationName:"name" type:"string"`
|
||||
|
||||
// The alias of the AFI owner. Possible values include self, amazon, and aws-marketplace.
|
||||
OwnerAlias *string `locationName:"ownerAlias" type:"string"`
|
||||
|
||||
// The AWS account ID of the AFI owner.
|
||||
OwnerId *string `locationName:"ownerId" type:"string"`
|
||||
|
||||
// Information about the PCI bus.
|
||||
PciId *PciId `locationName:"pciId" type:"structure"`
|
||||
|
||||
// The product codes for the AFI.
|
||||
ProductCodes []*ProductCode `locationName:"productCodes" locationNameList:"item" type:"list"`
|
||||
|
||||
// The version of the AWS Shell that was used to create the bitstream.
|
||||
ShellVersion *string `locationName:"shellVersion" type:"string"`
|
||||
|
||||
// Information about the state of the AFI.
|
||||
State *FpgaImageState `locationName:"state" type:"structure"`
|
||||
|
||||
// Any tags assigned to the AFI.
|
||||
Tags []*Tag `locationName:"tags" locationNameList:"item" type:"list"`
|
||||
|
||||
// The time of the most recent update to the AFI.
|
||||
UpdateTime *time.Time `locationName:"updateTime" type:"timestamp" timestampFormat:"iso8601"`
|
||||
}
|
||||
|
||||
// String returns the string representation
|
||||
func (s FpgaImage) String() string {
|
||||
return awsutil.Prettify(s)
|
||||
}
|
||||
|
||||
// GoString returns the string representation
|
||||
func (s FpgaImage) GoString() string {
|
||||
return s.String()
|
||||
}
|
||||
|
||||
// SetCreateTime sets the CreateTime field's value.
|
||||
func (s *FpgaImage) SetCreateTime(v time.Time) *FpgaImage {
|
||||
s.CreateTime = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetDescription sets the Description field's value.
|
||||
func (s *FpgaImage) SetDescription(v string) *FpgaImage {
|
||||
s.Description = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetFpgaImageGlobalId sets the FpgaImageGlobalId field's value.
|
||||
func (s *FpgaImage) SetFpgaImageGlobalId(v string) *FpgaImage {
|
||||
s.FpgaImageGlobalId = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetFpgaImageId sets the FpgaImageId field's value.
|
||||
func (s *FpgaImage) SetFpgaImageId(v string) *FpgaImage {
|
||||
s.FpgaImageId = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetName sets the Name field's value.
|
||||
func (s *FpgaImage) SetName(v string) *FpgaImage {
|
||||
s.Name = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetOwnerAlias sets the OwnerAlias field's value.
|
||||
func (s *FpgaImage) SetOwnerAlias(v string) *FpgaImage {
|
||||
s.OwnerAlias = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetOwnerId sets the OwnerId field's value.
|
||||
func (s *FpgaImage) SetOwnerId(v string) *FpgaImage {
|
||||
s.OwnerId = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetPciId sets the PciId field's value.
|
||||
func (s *FpgaImage) SetPciId(v *PciId) *FpgaImage {
|
||||
s.PciId = v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetProductCodes sets the ProductCodes field's value.
|
||||
func (s *FpgaImage) SetProductCodes(v []*ProductCode) *FpgaImage {
|
||||
s.ProductCodes = v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetShellVersion sets the ShellVersion field's value.
|
||||
func (s *FpgaImage) SetShellVersion(v string) *FpgaImage {
|
||||
s.ShellVersion = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetState sets the State field's value.
|
||||
func (s *FpgaImage) SetState(v *FpgaImageState) *FpgaImage {
|
||||
s.State = v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetTags sets the Tags field's value.
|
||||
func (s *FpgaImage) SetTags(v []*Tag) *FpgaImage {
|
||||
s.Tags = v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetUpdateTime sets the UpdateTime field's value.
|
||||
func (s *FpgaImage) SetUpdateTime(v time.Time) *FpgaImage {
|
||||
s.UpdateTime = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// Describes the state of the bitstream generation process for an Amazon FPGA
|
||||
// image (AFI).
|
||||
// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/FpgaImageState
|
||||
type FpgaImageState struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// The state. The following are the possible values:
|
||||
//
|
||||
// * pending - AFI bitstream generation is in progress.
|
||||
//
|
||||
// * available - The AFI is available for use.
|
||||
//
|
||||
// * failed - AFI bitstream generation failed.
|
||||
//
|
||||
// * unavailable - The AFI is no longer available for use.
|
||||
Code *string `locationName:"code" type:"string" enum:"FpgaImageStateCode"`
|
||||
|
||||
// If the state is failed, this is the error message.
|
||||
Message *string `locationName:"message" type:"string"`
|
||||
}
|
||||
|
||||
// String returns the string representation
|
||||
func (s FpgaImageState) String() string {
|
||||
return awsutil.Prettify(s)
|
||||
}
|
||||
|
||||
// GoString returns the string representation
|
||||
func (s FpgaImageState) GoString() string {
|
||||
return s.String()
|
||||
}
|
||||
|
||||
// SetCode sets the Code field's value.
|
||||
func (s *FpgaImageState) SetCode(v string) *FpgaImageState {
|
||||
s.Code = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetMessage sets the Message field's value.
|
||||
func (s *FpgaImageState) SetMessage(v string) *FpgaImageState {
|
||||
s.Message = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// Contains the parameters for GetConsoleOutput.
|
||||
// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/GetConsoleOutputRequest
|
||||
type GetConsoleOutputInput struct {
|
||||
@@ -45881,6 +46293,59 @@ func (s *NewDhcpConfiguration) SetValues(v []*string) *NewDhcpConfiguration {
|
||||
return s
|
||||
}
|
||||
|
||||
// Describes the data that identifies an Amazon FPGA image (AFI) on the PCI
|
||||
// bus.
|
||||
// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/PciId
|
||||
type PciId struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// The ID of the device.
|
||||
DeviceId *string `type:"string"`
|
||||
|
||||
// The ID of the subsystem.
|
||||
SubsystemId *string `type:"string"`
|
||||
|
||||
// The ID of the vendor for the subsystem.
|
||||
SubsystemVendorId *string `type:"string"`
|
||||
|
||||
// The ID of the vendor.
|
||||
VendorId *string `type:"string"`
|
||||
}
|
||||
|
||||
// String returns the string representation
|
||||
func (s PciId) String() string {
|
||||
return awsutil.Prettify(s)
|
||||
}
|
||||
|
||||
// GoString returns the string representation
|
||||
func (s PciId) GoString() string {
|
||||
return s.String()
|
||||
}
|
||||
|
||||
// SetDeviceId sets the DeviceId field's value.
|
||||
func (s *PciId) SetDeviceId(v string) *PciId {
|
||||
s.DeviceId = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetSubsystemId sets the SubsystemId field's value.
|
||||
func (s *PciId) SetSubsystemId(v string) *PciId {
|
||||
s.SubsystemId = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetSubsystemVendorId sets the SubsystemVendorId field's value.
|
||||
func (s *PciId) SetSubsystemVendorId(v string) *PciId {
|
||||
s.SubsystemVendorId = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetVendorId sets the VendorId field's value.
|
||||
func (s *PciId) SetVendorId(v string) *PciId {
|
||||
s.VendorId = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// Describes the VPC peering connection options.
|
||||
// Please also see https://docs.aws.amazon.com/goto/WebAPI/ec2-2016-11-15/PeeringConnectionOptions
|
||||
type PeeringConnectionOptions struct {
|
||||
@@ -45992,6 +46457,9 @@ type Placement struct {
|
||||
// is not supported for the ImportInstance command.
|
||||
HostId *string `locationName:"hostId" type:"string"`
|
||||
|
||||
// Reserved for future use.
|
||||
SpreadDomain *string `locationName:"spreadDomain" type:"string"`
|
||||
|
||||
// The tenancy of the instance (if the instance is running in a VPC). An instance
|
||||
// with a tenancy of dedicated runs on single-tenant hardware. The host tenancy
|
||||
// is not supported for the ImportInstance command.
|
||||
@@ -46032,6 +46500,12 @@ func (s *Placement) SetHostId(v string) *Placement {
|
||||
return s
|
||||
}
|
||||
|
||||
// SetSpreadDomain sets the SpreadDomain field's value.
|
||||
func (s *Placement) SetSpreadDomain(v string) *Placement {
|
||||
s.SpreadDomain = &v
|
||||
return s
|
||||
}
|
||||
|
||||
// SetTenancy sets the Tenancy field's value.
|
||||
func (s *Placement) SetTenancy(v string) *Placement {
|
||||
s.Tenancy = &v
|
||||
@@ -48411,7 +48885,7 @@ type RequestSpotInstancesInput struct {
|
||||
// Default: Instances are launched and terminated individually
|
||||
LaunchGroup *string `locationName:"launchGroup" type:"string"`
|
||||
|
||||
// Describes the launch specification for an instance.
|
||||
// The launch specification.
|
||||
LaunchSpecification *RequestSpotLaunchSpecification `type:"structure"`
|
||||
|
||||
// The maximum hourly price (bid) for any Spot instance launched to fulfill
|
||||
@@ -48600,7 +49074,9 @@ type RequestSpotLaunchSpecification struct {
|
||||
// The name of the key pair.
|
||||
KeyName *string `locationName:"keyName" type:"string"`
|
||||
|
||||
// Describes the monitoring of an instance.
|
||||
// Indicates whether basic or detailed monitoring is enabled for the instance.
|
||||
//
|
||||
// Default: Disabled
|
||||
Monitoring *RunInstancesMonitoringEnabled `locationName:"monitoring" type:"structure"`
|
||||
|
||||
// One or more network interfaces. If you specify a network interface, you must
|
||||
@@ -48613,8 +49089,12 @@ type RequestSpotLaunchSpecification struct {
|
||||
// The ID of the RAM disk.
|
||||
RamdiskId *string `locationName:"ramdiskId" type:"string"`
|
||||
|
||||
// One or more security group IDs.
|
||||
SecurityGroupIds []*string `locationName:"SecurityGroupId" locationNameList:"item" type:"list"`
|
||||
|
||||
// One or more security groups. When requesting instances in a VPC, you must
|
||||
// specify the IDs of the security groups. When requesting instances in EC2-Classic,
|
||||
// you can specify the names or the IDs of the security groups.
|
||||
SecurityGroups []*string `locationName:"SecurityGroup" locationNameList:"item" type:"list"`
|
||||
|
||||
// The ID of the subnet in which to launch the instance.
|
||||
@@ -56209,15 +56689,15 @@ func (s *VpcIpv6CidrBlockAssociation) SetIpv6CidrBlockState(v *VpcCidrBlockState
|
||||
type VpcPeeringConnection struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
// Information about the accepter VPC. CIDR block information is not returned
|
||||
// when creating a VPC peering connection, or when describing a VPC peering
|
||||
// connection that's in the initiating-request or pending-acceptance state.
|
||||
// Information about the accepter VPC. CIDR block information is only returned
|
||||
// when describing an active VPC peering connection.
|
||||
AccepterVpcInfo *VpcPeeringConnectionVpcInfo `locationName:"accepterVpcInfo" type:"structure"`
|
||||
|
||||
// The time that an unaccepted VPC peering connection will expire.
|
||||
ExpirationTime *time.Time `locationName:"expirationTime" type:"timestamp" timestampFormat:"iso8601"`
|
||||
|
||||
// Information about the requester VPC.
|
||||
// Information about the requester VPC. CIDR block information is only returned
|
||||
// when describing an active VPC peering connection.
|
||||
RequesterVpcInfo *VpcPeeringConnectionVpcInfo `locationName:"requesterVpcInfo" type:"structure"`
|
||||
|
||||
// The status of the VPC peering connection.
|
||||
@@ -57008,6 +57488,20 @@ const (
|
||||
FlowLogsResourceTypeNetworkInterface = "NetworkInterface"
|
||||
)
|
||||
|
||||
const (
|
||||
// FpgaImageStateCodePending is a FpgaImageStateCode enum value
|
||||
FpgaImageStateCodePending = "pending"
|
||||
|
||||
// FpgaImageStateCodeFailed is a FpgaImageStateCode enum value
|
||||
FpgaImageStateCodeFailed = "failed"
|
||||
|
||||
// FpgaImageStateCodeAvailable is a FpgaImageStateCode enum value
|
||||
FpgaImageStateCodeAvailable = "available"
|
||||
|
||||
// FpgaImageStateCodeUnavailable is a FpgaImageStateCode enum value
|
||||
FpgaImageStateCodeUnavailable = "unavailable"
|
||||
)
|
||||
|
||||
const (
|
||||
// GatewayTypeIpsec1 is a GatewayType enum value
|
||||
GatewayTypeIpsec1 = "ipsec.1"
|
||||
|
||||
48
vendor/github.com/aws/aws-sdk-go/service/s3/api.go
generated
vendored
48
vendor/github.com/aws/aws-sdk-go/service/s3/api.go
generated
vendored
@@ -6870,7 +6870,7 @@ type CompleteMultipartUploadInput struct {
|
||||
// Key is a required field
|
||||
Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
|
||||
|
||||
MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure"`
|
||||
MultipartUpload *CompletedMultipartUpload `locationName:"CompleteMultipartUpload" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
|
||||
|
||||
// Confirms that the requester knows that she or he will be charged for the
|
||||
// request. Bucket owners need not specify this parameter in their requests.
|
||||
@@ -7704,7 +7704,7 @@ type CreateBucketInput struct {
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
||||
CreateBucketConfiguration *CreateBucketConfiguration `locationName:"CreateBucketConfiguration" type:"structure"`
|
||||
CreateBucketConfiguration *CreateBucketConfiguration `locationName:"CreateBucketConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
|
||||
|
||||
// Allows grantee the read, write, read ACP, and write ACP permissions on the
|
||||
// bucket.
|
||||
@@ -9085,7 +9085,7 @@ type DeleteObjectsInput struct {
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
||||
// Delete is a required field
|
||||
Delete *Delete `locationName:"Delete" type:"structure" required:"true"`
|
||||
Delete *Delete `locationName:"Delete" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
|
||||
|
||||
// The concatenation of the authentication device's serial number, a space,
|
||||
// and the value that is displayed on your authentication device.
|
||||
@@ -11370,7 +11370,7 @@ func (s *GlacierJobParameters) SetTier(v string) *GlacierJobParameters {
|
||||
type Grant struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
Grantee *Grantee `type:"structure"`
|
||||
Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"`
|
||||
|
||||
// Specifies the permission given to the grantee.
|
||||
Permission *string `type:"string" enum:"Permission"`
|
||||
@@ -15133,7 +15133,7 @@ type PutBucketAccelerateConfigurationInput struct {
|
||||
// Specifies the Accelerate Configuration you want to set for the bucket.
|
||||
//
|
||||
// AccelerateConfiguration is a required field
|
||||
AccelerateConfiguration *AccelerateConfiguration `locationName:"AccelerateConfiguration" type:"structure" required:"true"`
|
||||
AccelerateConfiguration *AccelerateConfiguration `locationName:"AccelerateConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
|
||||
|
||||
// Name of the bucket for which the accelerate configuration is set.
|
||||
//
|
||||
@@ -15201,7 +15201,7 @@ type PutBucketAclInput struct {
|
||||
// The canned ACL to apply to the bucket.
|
||||
ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"BucketCannedACL"`
|
||||
|
||||
AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure"`
|
||||
AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
|
||||
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
@@ -15321,7 +15321,7 @@ type PutBucketAnalyticsConfigurationInput struct {
|
||||
// The configuration and any analyses for the analytics filter.
|
||||
//
|
||||
// AnalyticsConfiguration is a required field
|
||||
AnalyticsConfiguration *AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"structure" required:"true"`
|
||||
AnalyticsConfiguration *AnalyticsConfiguration `locationName:"AnalyticsConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
|
||||
|
||||
// The name of the bucket to which an analytics configuration is stored.
|
||||
//
|
||||
@@ -15409,7 +15409,7 @@ type PutBucketCorsInput struct {
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
||||
// CORSConfiguration is a required field
|
||||
CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true"`
|
||||
CORSConfiguration *CORSConfiguration `locationName:"CORSConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
|
||||
}
|
||||
|
||||
// String returns the string representation
|
||||
@@ -15487,7 +15487,7 @@ type PutBucketInventoryConfigurationInput struct {
|
||||
// Specifies the inventory configuration.
|
||||
//
|
||||
// InventoryConfiguration is a required field
|
||||
InventoryConfiguration *InventoryConfiguration `locationName:"InventoryConfiguration" type:"structure" required:"true"`
|
||||
InventoryConfiguration *InventoryConfiguration `locationName:"InventoryConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
|
||||
}
|
||||
|
||||
// String returns the string representation
|
||||
@@ -15564,7 +15564,7 @@ type PutBucketLifecycleConfigurationInput struct {
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
||||
LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure"`
|
||||
LifecycleConfiguration *BucketLifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
|
||||
}
|
||||
|
||||
// String returns the string representation
|
||||
@@ -15629,7 +15629,7 @@ type PutBucketLifecycleInput struct {
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
||||
LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure"`
|
||||
LifecycleConfiguration *LifecycleConfiguration `locationName:"LifecycleConfiguration" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
|
||||
}
|
||||
|
||||
// String returns the string representation
|
||||
@@ -15695,7 +15695,7 @@ type PutBucketLoggingInput struct {
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
||||
// BucketLoggingStatus is a required field
|
||||
BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true"`
|
||||
BucketLoggingStatus *BucketLoggingStatus `locationName:"BucketLoggingStatus" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
|
||||
}
|
||||
|
||||
// String returns the string representation
|
||||
@@ -15773,7 +15773,7 @@ type PutBucketMetricsConfigurationInput struct {
|
||||
// Specifies the metrics configuration.
|
||||
//
|
||||
// MetricsConfiguration is a required field
|
||||
MetricsConfiguration *MetricsConfiguration `locationName:"MetricsConfiguration" type:"structure" required:"true"`
|
||||
MetricsConfiguration *MetricsConfiguration `locationName:"MetricsConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
|
||||
}
|
||||
|
||||
// String returns the string representation
|
||||
@@ -15854,7 +15854,7 @@ type PutBucketNotificationConfigurationInput struct {
|
||||
// this element is empty, notifications are turned off on the bucket.
|
||||
//
|
||||
// NotificationConfiguration is a required field
|
||||
NotificationConfiguration *NotificationConfiguration `locationName:"NotificationConfiguration" type:"structure" required:"true"`
|
||||
NotificationConfiguration *NotificationConfiguration `locationName:"NotificationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
|
||||
}
|
||||
|
||||
// String returns the string representation
|
||||
@@ -15923,7 +15923,7 @@ type PutBucketNotificationInput struct {
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
||||
// NotificationConfiguration is a required field
|
||||
NotificationConfiguration *NotificationConfigurationDeprecated `locationName:"NotificationConfiguration" type:"structure" required:"true"`
|
||||
NotificationConfiguration *NotificationConfigurationDeprecated `locationName:"NotificationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
|
||||
}
|
||||
|
||||
// String returns the string representation
|
||||
@@ -16056,7 +16056,7 @@ type PutBucketReplicationInput struct {
|
||||
// replication configuration size can be up to 2 MB.
|
||||
//
|
||||
// ReplicationConfiguration is a required field
|
||||
ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true"`
|
||||
ReplicationConfiguration *ReplicationConfiguration `locationName:"ReplicationConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
|
||||
}
|
||||
|
||||
// String returns the string representation
|
||||
@@ -16125,7 +16125,7 @@ type PutBucketRequestPaymentInput struct {
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
||||
// RequestPaymentConfiguration is a required field
|
||||
RequestPaymentConfiguration *RequestPaymentConfiguration `locationName:"RequestPaymentConfiguration" type:"structure" required:"true"`
|
||||
RequestPaymentConfiguration *RequestPaymentConfiguration `locationName:"RequestPaymentConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
|
||||
}
|
||||
|
||||
// String returns the string representation
|
||||
@@ -16194,7 +16194,7 @@ type PutBucketTaggingInput struct {
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
||||
// Tagging is a required field
|
||||
Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true"`
|
||||
Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
|
||||
}
|
||||
|
||||
// String returns the string representation
|
||||
@@ -16267,7 +16267,7 @@ type PutBucketVersioningInput struct {
|
||||
MFA *string `location:"header" locationName:"x-amz-mfa" type:"string"`
|
||||
|
||||
// VersioningConfiguration is a required field
|
||||
VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true"`
|
||||
VersioningConfiguration *VersioningConfiguration `locationName:"VersioningConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
|
||||
}
|
||||
|
||||
// String returns the string representation
|
||||
@@ -16337,7 +16337,7 @@ type PutBucketWebsiteInput struct {
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
|
||||
// WebsiteConfiguration is a required field
|
||||
WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true"`
|
||||
WebsiteConfiguration *WebsiteConfiguration `locationName:"WebsiteConfiguration" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
|
||||
}
|
||||
|
||||
// String returns the string representation
|
||||
@@ -16405,7 +16405,7 @@ type PutObjectAclInput struct {
|
||||
// The canned ACL to apply to the object.
|
||||
ACL *string `location:"header" locationName:"x-amz-acl" type:"string" enum:"ObjectCannedACL"`
|
||||
|
||||
AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure"`
|
||||
AccessControlPolicy *AccessControlPolicy `locationName:"AccessControlPolicy" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
|
||||
|
||||
// Bucket is a required field
|
||||
Bucket *string `location:"uri" locationName:"Bucket" type:"string" required:"true"`
|
||||
@@ -16951,7 +16951,7 @@ type PutObjectTaggingInput struct {
|
||||
Key *string `location:"uri" locationName:"Key" min:"1" type:"string" required:"true"`
|
||||
|
||||
// Tagging is a required field
|
||||
Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true"`
|
||||
Tagging *Tagging `locationName:"Tagging" type:"structure" required:"true" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
|
||||
|
||||
VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
|
||||
}
|
||||
@@ -17485,7 +17485,7 @@ type RestoreObjectInput struct {
|
||||
// at http://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectsinRequesterPaysBuckets.html
|
||||
RequestPayer *string `location:"header" locationName:"x-amz-request-payer" type:"string" enum:"RequestPayer"`
|
||||
|
||||
RestoreRequest *RestoreRequest `locationName:"RestoreRequest" type:"structure"`
|
||||
RestoreRequest *RestoreRequest `locationName:"RestoreRequest" type:"structure" xmlURI:"http://s3.amazonaws.com/doc/2006-03-01/"`
|
||||
|
||||
VersionId *string `location:"querystring" locationName:"versionId" type:"string"`
|
||||
}
|
||||
@@ -18005,7 +18005,7 @@ func (s *Tagging) SetTagSet(v []*Tag) *Tagging {
|
||||
type TargetGrant struct {
|
||||
_ struct{} `type:"structure"`
|
||||
|
||||
Grantee *Grantee `type:"structure"`
|
||||
Grantee *Grantee `type:"structure" xmlPrefix:"xsi" xmlURI:"http://www.w3.org/2001/XMLSchema-instance"`
|
||||
|
||||
// Logging permissions assigned to the Grantee for the bucket.
|
||||
Permission *string `type:"string" enum:"BucketLogsPermission"`
|
||||
|
||||
2
vendor/github.com/coreos/etcd/client/doc.go
generated
vendored
2
vendor/github.com/coreos/etcd/client/doc.go
generated
vendored
@@ -59,7 +59,7 @@ Use a custom context to set timeouts on your operations:
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
// set a new key, ignoring it's previous state
|
||||
// set a new key, ignoring its previous state
|
||||
_, err := kAPI.Set(ctx, "/ping", "pong", nil)
|
||||
if err != nil {
|
||||
if err == context.DeadlineExceeded {
|
||||
|
||||
2
vendor/github.com/coreos/etcd/client/members.go
generated
vendored
2
vendor/github.com/coreos/etcd/client/members.go
generated
vendored
@@ -44,7 +44,7 @@ type Member struct {
|
||||
PeerURLs []string `json:"peerURLs"`
|
||||
|
||||
// ClientURLs represents the HTTP(S) endpoints on which this Member
|
||||
// serves it's client-facing APIs.
|
||||
// serves its client-facing APIs.
|
||||
ClientURLs []string `json:"clientURLs"`
|
||||
}
|
||||
|
||||
|
||||
2
vendor/github.com/coreos/etcd/clientv3/client.go
generated
vendored
2
vendor/github.com/coreos/etcd/clientv3/client.go
generated
vendored
@@ -333,7 +333,7 @@ func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientCo
|
||||
// when the cluster has a leader.
|
||||
func WithRequireLeader(ctx context.Context) context.Context {
|
||||
md := metadata.Pairs(rpctypes.MetadataRequireLeaderKey, rpctypes.MetadataHasLeader)
|
||||
return metadata.NewContext(ctx, md)
|
||||
return metadata.NewOutgoingContext(ctx, md)
|
||||
}
|
||||
|
||||
func newClient(cfg *Config) (*Client, error) {
|
||||
|
||||
10
vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go
generated
vendored
10
vendor/github.com/coreos/etcd/clientv3/concurrency/mutex.go
generated
vendored
@@ -49,7 +49,9 @@ func (m *Mutex) Lock(ctx context.Context) error {
|
||||
put := v3.OpPut(m.myKey, "", v3.WithLease(s.Lease()))
|
||||
// reuse key in case this session already holds the lock
|
||||
get := v3.OpGet(m.myKey)
|
||||
resp, err := client.Txn(ctx).If(cmp).Then(put).Else(get).Commit()
|
||||
// fetch current holder to complete uncontended path with only one RPC
|
||||
getOwner := v3.OpGet(m.pfx, v3.WithFirstCreate()...)
|
||||
resp, err := client.Txn(ctx).If(cmp).Then(put, getOwner).Else(get, getOwner).Commit()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -57,6 +59,12 @@ func (m *Mutex) Lock(ctx context.Context) error {
|
||||
if !resp.Succeeded {
|
||||
m.myRev = resp.Responses[0].GetResponseRange().Kvs[0].CreateRevision
|
||||
}
|
||||
// if no key on prefix / the minimum rev is key, already hold the lock
|
||||
ownerKey := resp.Responses[1].GetResponseRange().Kvs
|
||||
if len(ownerKey) == 0 || ownerKey[0].CreateRevision == m.myRev {
|
||||
m.hdr = resp.Header
|
||||
return nil
|
||||
}
|
||||
|
||||
// wait for deletion revisions prior to myKey
|
||||
hdr, werr := waitDeletes(ctx, client, m.pfx, m.myRev-1)
|
||||
|
||||
10
vendor/github.com/coreos/etcd/clientv3/lease.go
generated
vendored
10
vendor/github.com/coreos/etcd/clientv3/lease.go
generated
vendored
@@ -323,7 +323,7 @@ func (l *lessor) closeRequireLeader() {
|
||||
reqIdxs := 0
|
||||
// find all required leader channels, close, mark as nil
|
||||
for i, ctx := range ka.ctxs {
|
||||
md, ok := metadata.FromContext(ctx)
|
||||
md, ok := metadata.FromOutgoingContext(ctx)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
@@ -386,7 +386,7 @@ func (l *lessor) recvKeepAliveLoop() (gerr error) {
|
||||
close(l.donec)
|
||||
l.loopErr = gerr
|
||||
for _, ka := range l.keepAlives {
|
||||
ka.Close()
|
||||
ka.close()
|
||||
}
|
||||
l.keepAlives = make(map[LeaseID]*keepAlive)
|
||||
l.mu.Unlock()
|
||||
@@ -467,7 +467,7 @@ func (l *lessor) recvKeepAlive(resp *pb.LeaseKeepAliveResponse) {
|
||||
if karesp.TTL <= 0 {
|
||||
// lease expired; close all keep alive channels
|
||||
delete(l.keepAlives, karesp.ID)
|
||||
ka.Close()
|
||||
ka.close()
|
||||
return
|
||||
}
|
||||
|
||||
@@ -497,7 +497,7 @@ func (l *lessor) deadlineLoop() {
|
||||
for id, ka := range l.keepAlives {
|
||||
if ka.deadline.Before(now) {
|
||||
// waited too long for response; lease may be expired
|
||||
ka.Close()
|
||||
ka.close()
|
||||
delete(l.keepAlives, id)
|
||||
}
|
||||
}
|
||||
@@ -539,7 +539,7 @@ func (l *lessor) sendKeepAliveLoop(stream pb.Lease_LeaseKeepAliveClient) {
|
||||
}
|
||||
}
|
||||
|
||||
func (ka *keepAlive) Close() {
|
||||
func (ka *keepAlive) close() {
|
||||
close(ka.donec)
|
||||
for _, ch := range ka.chs {
|
||||
close(ch)
|
||||
|
||||
11
vendor/github.com/coreos/etcd/clientv3/watch.go
generated
vendored
11
vendor/github.com/coreos/etcd/clientv3/watch.go
generated
vendored
@@ -40,10 +40,9 @@ type WatchChan <-chan WatchResponse
|
||||
|
||||
type Watcher interface {
|
||||
// Watch watches on a key or prefix. The watched events will be returned
|
||||
// through the returned channel.
|
||||
// If the watch is slow or the required rev is compacted, the watch request
|
||||
// might be canceled from the server-side and the chan will be closed.
|
||||
// 'opts' can be: 'WithRev' and/or 'WithPrefix'.
|
||||
// through the returned channel. If revisions waiting to be sent over the
|
||||
// watch are compacted, then the watch will be canceled by the server, the
|
||||
// client will post a compacted error watch response, and the channel will close.
|
||||
Watch(ctx context.Context, key string, opts ...OpOption) WatchChan
|
||||
|
||||
// Close closes the watcher and cancels all watch requests.
|
||||
@@ -317,14 +316,14 @@ func (w *watcher) Close() (err error) {
|
||||
w.streams = nil
|
||||
w.mu.Unlock()
|
||||
for _, wgs := range streams {
|
||||
if werr := wgs.Close(); werr != nil {
|
||||
if werr := wgs.close(); werr != nil {
|
||||
err = werr
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (w *watchGrpcStream) Close() (err error) {
|
||||
func (w *watchGrpcStream) close() (err error) {
|
||||
w.cancel()
|
||||
<-w.donec
|
||||
select {
|
||||
|
||||
778
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go
generated
vendored
778
vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
21
vendor/github.com/coreos/go-semver/semver/semver.go
generated
vendored
21
vendor/github.com/coreos/go-semver/semver/semver.go
generated
vendored
@@ -19,6 +19,7 @@ import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
@@ -76,6 +77,14 @@ func (v *Version) Set(version string) error {
|
||||
return fmt.Errorf("%s is not in dotted-tri format", version)
|
||||
}
|
||||
|
||||
if err := validateIdentifier(string(preRelease)); err != nil {
|
||||
return fmt.Errorf("failed to validate pre-release: %v", err)
|
||||
}
|
||||
|
||||
if err := validateIdentifier(metadata); err != nil {
|
||||
return fmt.Errorf("failed to validate metadata: %v", err)
|
||||
}
|
||||
|
||||
parsed := make([]int64, 3, 3)
|
||||
|
||||
for i, v := range dotParts[:3] {
|
||||
@@ -273,3 +282,15 @@ func (v *Version) BumpPatch() {
|
||||
v.PreRelease = PreRelease("")
|
||||
v.Metadata = ""
|
||||
}
|
||||
|
||||
// validateIdentifier makes sure the provided identifier satisfies semver spec
|
||||
func validateIdentifier(id string) error {
|
||||
if id != "" && !reIdentifier.MatchString(id) {
|
||||
return fmt.Errorf("%s is not a valid semver identifier", id)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// reIdentifier is a regular expression used to check that pre-release and metadata
|
||||
// identifiers satisfy the spec requirements
|
||||
var reIdentifier = regexp.MustCompile(`^[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*$`)
|
||||
|
||||
21
vendor/github.com/denisenkom/go-mssqldb/buf.go
generated
vendored
21
vendor/github.com/denisenkom/go-mssqldb/buf.go
generated
vendored
@@ -1,9 +1,11 @@
|
||||
package mssql
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
"net"
|
||||
)
|
||||
|
||||
type packetType uint8
|
||||
@@ -51,6 +53,19 @@ func newTdsBuffer(bufsize uint16, transport io.ReadWriteCloser) *tdsBuffer {
|
||||
return w
|
||||
}
|
||||
|
||||
func checkBadConn(err error) error {
|
||||
if err == io.EOF {
|
||||
return driver.ErrBadConn
|
||||
}
|
||||
|
||||
switch err.(type) {
|
||||
case net.Error:
|
||||
return driver.ErrBadConn
|
||||
default:
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
func (rw *tdsBuffer) ResizeBuffer(packetsizei int) {
|
||||
if len(rw.rbuf) != packetsizei {
|
||||
newbuf := make([]byte, packetsizei)
|
||||
@@ -137,7 +152,7 @@ func (r *tdsBuffer) readNextPacket() error {
|
||||
var err error
|
||||
err = binary.Read(r.transport, binary.BigEndian, &header)
|
||||
if err != nil {
|
||||
return err
|
||||
return checkBadConn(err)
|
||||
}
|
||||
offset := uint16(binary.Size(header))
|
||||
if int(header.Size) > len(r.rbuf) {
|
||||
@@ -148,7 +163,7 @@ func (r *tdsBuffer) readNextPacket() error {
|
||||
}
|
||||
_, err = io.ReadFull(r.transport, r.rbuf[offset:header.Size])
|
||||
if err != nil {
|
||||
return err
|
||||
return checkBadConn(err)
|
||||
}
|
||||
r.rpos = offset
|
||||
r.rsize = header.Size
|
||||
@@ -191,7 +206,7 @@ func (r *tdsBuffer) byte() byte {
|
||||
func (r *tdsBuffer) ReadFull(buf []byte) {
|
||||
_, err := io.ReadFull(r, buf[:])
|
||||
if err != nil {
|
||||
badStreamPanic(err)
|
||||
badStreamPanic(checkBadConn(err))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
2
vendor/github.com/denisenkom/go-mssqldb/mssql.go
generated
vendored
2
vendor/github.com/denisenkom/go-mssqldb/mssql.go
generated
vendored
@@ -6,13 +6,13 @@ import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"golang.org/x/net/context" // use the "x/net/context" for backwards compatibility.
|
||||
"io"
|
||||
"math"
|
||||
"net"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
"golang.org/x/net/context" // use the "x/net/context" for backwards compatibility.
|
||||
)
|
||||
|
||||
var driverInstance = &MssqlDriver{processQueryText: true}
|
||||
|
||||
6
vendor/github.com/denisenkom/go-mssqldb/token.go
generated
vendored
6
vendor/github.com/denisenkom/go-mssqldb/token.go
generated
vendored
@@ -8,6 +8,8 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"database/sql/driver"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
@@ -537,7 +539,7 @@ func processSingleResponse(sess *tdsSession, ch chan tokenStruct) {
|
||||
return
|
||||
}
|
||||
if packet_type != packReply {
|
||||
badStreamPanicf("invalid response packet type, expected REPLY, actual: %d", packet_type)
|
||||
badStreamPanic(driver.ErrBadConn)
|
||||
}
|
||||
var columns []columnStruct
|
||||
errs := make([]Error, 0, 5)
|
||||
@@ -613,7 +615,7 @@ func processSingleResponse(sess *tdsSession, ch chan tokenStruct) {
|
||||
sess.log.Println(info.Message)
|
||||
}
|
||||
default:
|
||||
badStreamPanicf("Unknown token type: %d", token)
|
||||
badStreamPanic(driver.ErrBadConn)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
4
vendor/github.com/dgrijalva/jwt-go/README.md
generated
vendored
4
vendor/github.com/dgrijalva/jwt-go/README.md
generated
vendored
@@ -74,7 +74,7 @@ It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is
|
||||
|
||||
Without going too far down the rabbit hole, here's a description of the interaction of these technologies:
|
||||
|
||||
* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth.
|
||||
* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth.
|
||||
* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token.
|
||||
* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL.
|
||||
|
||||
@@ -82,4 +82,4 @@ Without going too far down the rabbit hole, here's a description of the interact
|
||||
|
||||
Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go).
|
||||
|
||||
The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in to documentation.
|
||||
The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation.
|
||||
|
||||
6
vendor/github.com/docker/docker/api/types/types.go
generated
vendored
6
vendor/github.com/docker/docker/api/types/types.go
generated
vendored
@@ -468,6 +468,12 @@ type NetworkDisconnect struct {
|
||||
Force bool
|
||||
}
|
||||
|
||||
// NetworkInspectOptions holds parameters to inspect network
|
||||
type NetworkInspectOptions struct {
|
||||
Scope string
|
||||
Verbose bool
|
||||
}
|
||||
|
||||
// Checkpoint represents the details of a checkpoint
|
||||
type Checkpoint struct {
|
||||
Name string // Name is the name of the checkpoint
|
||||
|
||||
24
vendor/github.com/docker/docker/opts/ulimit.go
generated
vendored
24
vendor/github.com/docker/docker/opts/ulimit.go
generated
vendored
@@ -55,3 +55,27 @@ func (o *UlimitOpt) GetList() []*units.Ulimit {
|
||||
func (o *UlimitOpt) Type() string {
|
||||
return "ulimit"
|
||||
}
|
||||
|
||||
// NamedUlimitOpt defines a named map of Ulimits
|
||||
type NamedUlimitOpt struct {
|
||||
name string
|
||||
UlimitOpt
|
||||
}
|
||||
|
||||
var _ NamedOption = &NamedUlimitOpt{}
|
||||
|
||||
// NewNamedUlimitOpt creates a new NamedUlimitOpt
|
||||
func NewNamedUlimitOpt(name string, ref *map[string]*units.Ulimit) *NamedUlimitOpt {
|
||||
if ref == nil {
|
||||
ref = &map[string]*units.Ulimit{}
|
||||
}
|
||||
return &NamedUlimitOpt{
|
||||
name: name,
|
||||
UlimitOpt: *NewUlimitOpt(ref),
|
||||
}
|
||||
}
|
||||
|
||||
// Name returns the option name
|
||||
func (o *NamedUlimitOpt) Name() string {
|
||||
return o.name
|
||||
}
|
||||
|
||||
213
vendor/github.com/docker/docker/pkg/archive/archive.go
generated
vendored
213
vendor/github.com/docker/docker/pkg/archive/archive.go
generated
vendored
@@ -6,7 +6,6 @@ import (
|
||||
"bytes"
|
||||
"compress/bzip2"
|
||||
"compress/gzip"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
@@ -31,10 +30,6 @@ type (
|
||||
Compression int
|
||||
// WhiteoutFormat is the format of whiteouts unpacked
|
||||
WhiteoutFormat int
|
||||
// TarChownOptions wraps the chown options UID and GID.
|
||||
TarChownOptions struct {
|
||||
UID, GID int
|
||||
}
|
||||
|
||||
// TarOptions wraps the tar options.
|
||||
TarOptions struct {
|
||||
@@ -44,7 +39,7 @@ type (
|
||||
NoLchown bool
|
||||
UIDMaps []idtools.IDMap
|
||||
GIDMaps []idtools.IDMap
|
||||
ChownOpts *TarChownOptions
|
||||
ChownOpts *idtools.IDPair
|
||||
IncludeSourceDir bool
|
||||
// WhiteoutFormat is the expected on disk format for whiteout files.
|
||||
// This format will be converted to the standard format on pack
|
||||
@@ -58,33 +53,26 @@ type (
|
||||
RebaseNames map[string]string
|
||||
InUserNS bool
|
||||
}
|
||||
|
||||
// Archiver allows the reuse of most utility functions of this package
|
||||
// with a pluggable Untar function. Also, to facilitate the passing of
|
||||
// specific id mappings for untar, an archiver can be created with maps
|
||||
// which will then be passed to Untar operations
|
||||
Archiver struct {
|
||||
Untar func(io.Reader, string, *TarOptions) error
|
||||
UIDMaps []idtools.IDMap
|
||||
GIDMaps []idtools.IDMap
|
||||
}
|
||||
|
||||
// breakoutError is used to differentiate errors related to breaking out
|
||||
// When testing archive breakout in the unit tests, this error is expected
|
||||
// in order for the test to pass.
|
||||
breakoutError error
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrNotImplemented is the error message of function not implemented.
|
||||
ErrNotImplemented = errors.New("Function not implemented")
|
||||
defaultArchiver = &Archiver{Untar: Untar, UIDMaps: nil, GIDMaps: nil}
|
||||
)
|
||||
// Archiver allows the reuse of most utility functions of this package
|
||||
// with a pluggable Untar function. Also, to facilitate the passing of
|
||||
// specific id mappings for untar, an archiver can be created with maps
|
||||
// which will then be passed to Untar operations
|
||||
type Archiver struct {
|
||||
Untar func(io.Reader, string, *TarOptions) error
|
||||
IDMappings *idtools.IDMappings
|
||||
}
|
||||
|
||||
const (
|
||||
// HeaderSize is the size in bytes of a tar header
|
||||
HeaderSize = 512
|
||||
)
|
||||
// NewDefaultArchiver returns a new Archiver without any IDMappings
|
||||
func NewDefaultArchiver() *Archiver {
|
||||
return &Archiver{Untar: Untar, IDMappings: &idtools.IDMappings{}}
|
||||
}
|
||||
|
||||
// breakoutError is used to differentiate errors related to breaking out
|
||||
// When testing archive breakout in the unit tests, this error is expected
|
||||
// in order for the test to pass.
|
||||
type breakoutError error
|
||||
|
||||
const (
|
||||
// Uncompressed represents the uncompressed.
|
||||
@@ -105,18 +93,6 @@ const (
|
||||
OverlayWhiteoutFormat
|
||||
)
|
||||
|
||||
// IsArchive checks for the magic bytes of a tar or any supported compression
|
||||
// algorithm.
|
||||
func IsArchive(header []byte) bool {
|
||||
compression := DetectCompression(header)
|
||||
if compression != Uncompressed {
|
||||
return true
|
||||
}
|
||||
r := tar.NewReader(bytes.NewBuffer(header))
|
||||
_, err := r.Next()
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// IsArchivePath checks if the (possibly compressed) file at the given path
|
||||
// starts with a tar file header.
|
||||
func IsArchivePath(path string) bool {
|
||||
@@ -369,9 +345,8 @@ type tarAppender struct {
|
||||
Buffer *bufio.Writer
|
||||
|
||||
// for hardlink mapping
|
||||
SeenFiles map[uint64]string
|
||||
UIDMaps []idtools.IDMap
|
||||
GIDMaps []idtools.IDMap
|
||||
SeenFiles map[uint64]string
|
||||
IDMappings *idtools.IDMappings
|
||||
|
||||
// For packing and unpacking whiteout files in the
|
||||
// non standard format. The whiteout files defined
|
||||
@@ -380,6 +355,15 @@ type tarAppender struct {
|
||||
WhiteoutConverter tarWhiteoutConverter
|
||||
}
|
||||
|
||||
func newTarAppender(idMapping *idtools.IDMappings, writer io.Writer) *tarAppender {
|
||||
return &tarAppender{
|
||||
SeenFiles: make(map[uint64]string),
|
||||
TarWriter: tar.NewWriter(writer),
|
||||
Buffer: pools.BufioWriter32KPool.Get(nil),
|
||||
IDMappings: idMapping,
|
||||
}
|
||||
}
|
||||
|
||||
// canonicalTarName provides a platform-independent and consistent posix-style
|
||||
//path for files and directories to be archived regardless of the platform.
|
||||
func canonicalTarName(name string, isDir bool) (string, error) {
|
||||
@@ -428,21 +412,15 @@ func (ta *tarAppender) addTarFile(path, name string) error {
|
||||
//handle re-mapping container ID mappings back to host ID mappings before
|
||||
//writing tar headers/files. We skip whiteout files because they were written
|
||||
//by the kernel and already have proper ownership relative to the host
|
||||
if !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && (ta.UIDMaps != nil || ta.GIDMaps != nil) {
|
||||
uid, gid, err := getFileUIDGID(fi.Sys())
|
||||
if !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && !ta.IDMappings.Empty() {
|
||||
fileIDPair, err := getFileUIDGID(fi.Sys())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
xUID, err := idtools.ToContainer(uid, ta.UIDMaps)
|
||||
hdr.Uid, hdr.Gid, err = ta.IDMappings.ToContainer(fileIDPair)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
xGID, err := idtools.ToContainer(gid, ta.GIDMaps)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hdr.Uid = xUID
|
||||
hdr.Gid = xGID
|
||||
}
|
||||
|
||||
if ta.WhiteoutConverter != nil {
|
||||
@@ -496,7 +474,7 @@ func (ta *tarAppender) addTarFile(path, name string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *TarChownOptions, inUserns bool) error {
|
||||
func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *idtools.IDPair, inUserns bool) error {
|
||||
// hdr.Mode is in linux format, which we can use for sycalls,
|
||||
// but for os.Foo() calls we need the mode converted to os.FileMode,
|
||||
// so use hdrInfo.Mode() (they differ for e.g. setuid bits)
|
||||
@@ -576,7 +554,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
|
||||
// Lchown is not supported on Windows.
|
||||
if Lchown && runtime.GOOS != "windows" {
|
||||
if chownOpts == nil {
|
||||
chownOpts = &TarChownOptions{UID: hdr.Uid, GID: hdr.Gid}
|
||||
chownOpts = &idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid}
|
||||
}
|
||||
if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil {
|
||||
return err
|
||||
@@ -664,14 +642,11 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
|
||||
}
|
||||
|
||||
go func() {
|
||||
ta := &tarAppender{
|
||||
TarWriter: tar.NewWriter(compressWriter),
|
||||
Buffer: pools.BufioWriter32KPool.Get(nil),
|
||||
SeenFiles: make(map[uint64]string),
|
||||
UIDMaps: options.UIDMaps,
|
||||
GIDMaps: options.GIDMaps,
|
||||
WhiteoutConverter: getWhiteoutConverter(options.WhiteoutFormat),
|
||||
}
|
||||
ta := newTarAppender(
|
||||
idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps),
|
||||
compressWriter,
|
||||
)
|
||||
ta.WhiteoutConverter = getWhiteoutConverter(options.WhiteoutFormat)
|
||||
|
||||
defer func() {
|
||||
// Make sure to check the error on Close.
|
||||
@@ -827,10 +802,8 @@ func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) err
|
||||
defer pools.BufioReader32KPool.Put(trBuf)
|
||||
|
||||
var dirs []*tar.Header
|
||||
remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
|
||||
rootIDs := idMappings.RootPair()
|
||||
whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat)
|
||||
|
||||
// Iterate through the files in the archive.
|
||||
@@ -864,7 +837,7 @@ loop:
|
||||
parent := filepath.Dir(hdr.Name)
|
||||
parentPath := filepath.Join(dest, parent)
|
||||
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
|
||||
err = idtools.MkdirAllNewAs(parentPath, 0777, remappedRootUID, remappedRootGID)
|
||||
err = idtools.MkdirAllAndChownNew(parentPath, 0777, rootIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -909,26 +882,8 @@ loop:
|
||||
}
|
||||
trBuf.Reset(tr)
|
||||
|
||||
// if the options contain a uid & gid maps, convert header uid/gid
|
||||
// entries using the maps such that lchown sets the proper mapped
|
||||
// uid/gid after writing the file. We only perform this mapping if
|
||||
// the file isn't already owned by the remapped root UID or GID, as
|
||||
// that specific uid/gid has no mapping from container -> host, and
|
||||
// those files already have the proper ownership for inside the
|
||||
// container.
|
||||
if hdr.Uid != remappedRootUID {
|
||||
xUID, err := idtools.ToHost(hdr.Uid, options.UIDMaps)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hdr.Uid = xUID
|
||||
}
|
||||
if hdr.Gid != remappedRootGID {
|
||||
xGID, err := idtools.ToHost(hdr.Gid, options.GIDMaps)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hdr.Gid = xGID
|
||||
if err := remapIDs(idMappings, hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if whiteoutConverter != nil {
|
||||
@@ -1013,23 +968,13 @@ func (archiver *Archiver) TarUntar(src, dst string) error {
|
||||
return err
|
||||
}
|
||||
defer archive.Close()
|
||||
|
||||
var options *TarOptions
|
||||
if archiver.UIDMaps != nil || archiver.GIDMaps != nil {
|
||||
options = &TarOptions{
|
||||
UIDMaps: archiver.UIDMaps,
|
||||
GIDMaps: archiver.GIDMaps,
|
||||
}
|
||||
options := &TarOptions{
|
||||
UIDMaps: archiver.IDMappings.UIDs(),
|
||||
GIDMaps: archiver.IDMappings.GIDs(),
|
||||
}
|
||||
return archiver.Untar(archive, dst, options)
|
||||
}
|
||||
|
||||
// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other.
|
||||
// If either Tar or Untar fails, TarUntar aborts and returns the error.
|
||||
func TarUntar(src, dst string) error {
|
||||
return defaultArchiver.TarUntar(src, dst)
|
||||
}
|
||||
|
||||
// UntarPath untar a file from path to a destination, src is the source tar file path.
|
||||
func (archiver *Archiver) UntarPath(src, dst string) error {
|
||||
archive, err := os.Open(src)
|
||||
@@ -1037,22 +982,13 @@ func (archiver *Archiver) UntarPath(src, dst string) error {
|
||||
return err
|
||||
}
|
||||
defer archive.Close()
|
||||
var options *TarOptions
|
||||
if archiver.UIDMaps != nil || archiver.GIDMaps != nil {
|
||||
options = &TarOptions{
|
||||
UIDMaps: archiver.UIDMaps,
|
||||
GIDMaps: archiver.GIDMaps,
|
||||
}
|
||||
options := &TarOptions{
|
||||
UIDMaps: archiver.IDMappings.UIDs(),
|
||||
GIDMaps: archiver.IDMappings.GIDs(),
|
||||
}
|
||||
return archiver.Untar(archive, dst, options)
|
||||
}
|
||||
|
||||
// UntarPath is a convenience function which looks for an archive
|
||||
// at filesystem path `src`, and unpacks it at `dst`.
|
||||
func UntarPath(src, dst string) error {
|
||||
return defaultArchiver.UntarPath(src, dst)
|
||||
}
|
||||
|
||||
// CopyWithTar creates a tar archive of filesystem path `src`, and
|
||||
// unpacks it at filesystem path `dst`.
|
||||
// The archive is streamed directly with fixed buffering and no
|
||||
@@ -1069,27 +1005,16 @@ func (archiver *Archiver) CopyWithTar(src, dst string) error {
|
||||
// if this archiver is set up with ID mapping we need to create
|
||||
// the new destination directory with the remapped root UID/GID pair
|
||||
// as owner
|
||||
rootUID, rootGID, err := idtools.GetRootUIDGID(archiver.UIDMaps, archiver.GIDMaps)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rootIDs := archiver.IDMappings.RootPair()
|
||||
// Create dst, copy src's content into it
|
||||
logrus.Debugf("Creating dest directory: %s", dst)
|
||||
if err := idtools.MkdirAllNewAs(dst, 0755, rootUID, rootGID); err != nil {
|
||||
if err := idtools.MkdirAllAndChownNew(dst, 0755, rootIDs); err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Debugf("Calling TarUntar(%s, %s)", src, dst)
|
||||
return archiver.TarUntar(src, dst)
|
||||
}
|
||||
|
||||
// CopyWithTar creates a tar archive of filesystem path `src`, and
|
||||
// unpacks it at filesystem path `dst`.
|
||||
// The archive is streamed directly with fixed buffering and no
|
||||
// intermediary disk IO.
|
||||
func CopyWithTar(src, dst string) error {
|
||||
return defaultArchiver.CopyWithTar(src, dst)
|
||||
}
|
||||
|
||||
// CopyFileWithTar emulates the behavior of the 'cp' command-line
|
||||
// for a single file. It copies a regular file from path `src` to
|
||||
// path `dst`, and preserves all its metadata.
|
||||
@@ -1131,28 +1056,10 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
|
||||
hdr.Name = filepath.Base(dst)
|
||||
hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode)))
|
||||
|
||||
remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(archiver.UIDMaps, archiver.GIDMaps)
|
||||
if err != nil {
|
||||
if err := remapIDs(archiver.IDMappings, hdr); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// only perform mapping if the file being copied isn't already owned by the
|
||||
// uid or gid of the remapped root in the container
|
||||
if remappedRootUID != hdr.Uid {
|
||||
xUID, err := idtools.ToHost(hdr.Uid, archiver.UIDMaps)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hdr.Uid = xUID
|
||||
}
|
||||
if remappedRootGID != hdr.Gid {
|
||||
xGID, err := idtools.ToHost(hdr.Gid, archiver.GIDMaps)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
hdr.Gid = xGID
|
||||
}
|
||||
|
||||
tw := tar.NewWriter(w)
|
||||
defer tw.Close()
|
||||
if err := tw.WriteHeader(hdr); err != nil {
|
||||
@@ -1176,16 +1083,10 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
// CopyFileWithTar emulates the behavior of the 'cp' command-line
|
||||
// for a single file. It copies a regular file from path `src` to
|
||||
// path `dst`, and preserves all its metadata.
|
||||
//
|
||||
// Destination handling is in an operating specific manner depending
|
||||
// where the daemon is running. If `dst` ends with a trailing slash
|
||||
// the final destination path will be `dst/base(src)` (Linux) or
|
||||
// `dst\base(src)` (Windows).
|
||||
func CopyFileWithTar(src, dst string) (err error) {
|
||||
return defaultArchiver.CopyFileWithTar(src, dst)
|
||||
func remapIDs(idMappings *idtools.IDMappings, hdr *tar.Header) error {
|
||||
ids, err := idMappings.ToHost(idtools.IDPair{UID: hdr.Uid, GID: hdr.Gid})
|
||||
hdr.Uid, hdr.Gid = ids.UID, ids.GID
|
||||
return err
|
||||
}
|
||||
|
||||
// cmdStream executes a command, and returns its stdout as a stream.
|
||||
|
||||
7
vendor/github.com/docker/docker/pkg/archive/archive_unix.go
generated
vendored
7
vendor/github.com/docker/docker/pkg/archive/archive_unix.go
generated
vendored
@@ -9,6 +9,7 @@ import (
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
rsystem "github.com/opencontainers/runc/libcontainer/system"
|
||||
)
|
||||
@@ -72,13 +73,13 @@ func getInodeFromStat(stat interface{}) (inode uint64, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func getFileUIDGID(stat interface{}) (int, int, error) {
|
||||
func getFileUIDGID(stat interface{}) (idtools.IDPair, error) {
|
||||
s, ok := stat.(*syscall.Stat_t)
|
||||
|
||||
if !ok {
|
||||
return -1, -1, errors.New("cannot convert stat value to syscall.Stat_t")
|
||||
return idtools.IDPair{}, errors.New("cannot convert stat value to syscall.Stat_t")
|
||||
}
|
||||
return int(s.Uid), int(s.Gid), nil
|
||||
return idtools.IDPair{UID: int(s.Uid), GID: int(s.Gid)}, nil
|
||||
}
|
||||
|
||||
func major(device uint64) uint64 {
|
||||
|
||||
5
vendor/github.com/docker/docker/pkg/archive/archive_windows.go
generated
vendored
5
vendor/github.com/docker/docker/pkg/archive/archive_windows.go
generated
vendored
@@ -9,6 +9,7 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/longpath"
|
||||
)
|
||||
|
||||
@@ -72,7 +73,7 @@ func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func getFileUIDGID(stat interface{}) (int, int, error) {
|
||||
func getFileUIDGID(stat interface{}) (idtools.IDPair, error) {
|
||||
// no notion of file ownership mapping yet on Windows
|
||||
return 0, 0, nil
|
||||
return idtools.IDPair{0, 0}, nil
|
||||
}
|
||||
|
||||
9
vendor/github.com/docker/docker/pkg/archive/changes.go
generated
vendored
9
vendor/github.com/docker/docker/pkg/archive/changes.go
generated
vendored
@@ -394,13 +394,8 @@ func ChangesSize(newDir string, changes []Change) int64 {
|
||||
func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (io.ReadCloser, error) {
|
||||
reader, writer := io.Pipe()
|
||||
go func() {
|
||||
ta := &tarAppender{
|
||||
TarWriter: tar.NewWriter(writer),
|
||||
Buffer: pools.BufioWriter32KPool.Get(nil),
|
||||
SeenFiles: make(map[uint64]string),
|
||||
UIDMaps: uidMaps,
|
||||
GIDMaps: gidMaps,
|
||||
}
|
||||
ta := newTarAppender(idtools.NewIDMappingsFromMaps(uidMaps, gidMaps), writer)
|
||||
|
||||
// this buffer is needed for the duration of this piped stream
|
||||
defer pools.BufioWriter32KPool.Put(ta.Buffer)
|
||||
|
||||
|
||||
28
vendor/github.com/docker/docker/pkg/archive/diff.go
generated
vendored
28
vendor/github.com/docker/docker/pkg/archive/diff.go
generated
vendored
@@ -33,10 +33,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
|
||||
if options.ExcludePatterns == nil {
|
||||
options.ExcludePatterns = []string{}
|
||||
}
|
||||
remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
idMappings := idtools.NewIDMappingsFromMaps(options.UIDMaps, options.GIDMaps)
|
||||
|
||||
aufsTempdir := ""
|
||||
aufsHardlinks := make(map[string]*tar.Header)
|
||||
@@ -195,27 +192,10 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
|
||||
srcData = tmpFile
|
||||
}
|
||||
|
||||
// if the options contain a uid & gid maps, convert header uid/gid
|
||||
// entries using the maps such that lchown sets the proper mapped
|
||||
// uid/gid after writing the file. We only perform this mapping if
|
||||
// the file isn't already owned by the remapped root UID or GID, as
|
||||
// that specific uid/gid has no mapping from container -> host, and
|
||||
// those files already have the proper ownership for inside the
|
||||
// container.
|
||||
if srcHdr.Uid != remappedRootUID {
|
||||
xUID, err := idtools.ToHost(srcHdr.Uid, options.UIDMaps)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
srcHdr.Uid = xUID
|
||||
}
|
||||
if srcHdr.Gid != remappedRootGID {
|
||||
xGID, err := idtools.ToHost(srcHdr.Gid, options.GIDMaps)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
srcHdr.Gid = xGID
|
||||
if err := remapIDs(idMappings, srcHdr); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if err := createTarFile(path, dest, srcHdr, srcData, true, nil, options.InUserNS); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
146
vendor/github.com/docker/docker/pkg/idtools/idtools.go
generated
vendored
146
vendor/github.com/docker/docker/pkg/idtools/idtools.go
generated
vendored
@@ -37,49 +37,56 @@ const (
|
||||
// MkdirAllAs creates a directory (include any along the path) and then modifies
|
||||
// ownership to the requested uid/gid. If the directory already exists, this
|
||||
// function will still change ownership to the requested uid/gid pair.
|
||||
// Deprecated: Use MkdirAllAndChown
|
||||
func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
|
||||
return mkdirAs(path, mode, ownerUID, ownerGID, true, true)
|
||||
}
|
||||
|
||||
// MkdirAllNewAs creates a directory (include any along the path) and then modifies
|
||||
// ownership ONLY of newly created directories to the requested uid/gid. If the
|
||||
// directories along the path exist, no change of ownership will be performed
|
||||
func MkdirAllNewAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
|
||||
return mkdirAs(path, mode, ownerUID, ownerGID, true, false)
|
||||
}
|
||||
|
||||
// MkdirAs creates a directory and then modifies ownership to the requested uid/gid.
|
||||
// If the directory already exists, this function still changes ownership
|
||||
// Deprecated: Use MkdirAndChown with a IDPair
|
||||
func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error {
|
||||
return mkdirAs(path, mode, ownerUID, ownerGID, false, true)
|
||||
}
|
||||
|
||||
// MkdirAllAndChown creates a directory (include any along the path) and then modifies
|
||||
// ownership to the requested uid/gid. If the directory already exists, this
|
||||
// function will still change ownership to the requested uid/gid pair.
|
||||
func MkdirAllAndChown(path string, mode os.FileMode, ids IDPair) error {
|
||||
return mkdirAs(path, mode, ids.UID, ids.GID, true, true)
|
||||
}
|
||||
|
||||
// MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid.
|
||||
// If the directory already exists, this function still changes ownership
|
||||
func MkdirAndChown(path string, mode os.FileMode, ids IDPair) error {
|
||||
return mkdirAs(path, mode, ids.UID, ids.GID, false, true)
|
||||
}
|
||||
|
||||
// MkdirAllAndChownNew creates a directory (include any along the path) and then modifies
|
||||
// ownership ONLY of newly created directories to the requested uid/gid. If the
|
||||
// directories along the path exist, no change of ownership will be performed
|
||||
func MkdirAllAndChownNew(path string, mode os.FileMode, ids IDPair) error {
|
||||
return mkdirAs(path, mode, ids.UID, ids.GID, true, false)
|
||||
}
|
||||
|
||||
// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps.
|
||||
// If the maps are empty, then the root uid/gid will default to "real" 0/0
|
||||
func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
|
||||
var uid, gid int
|
||||
|
||||
if uidMap != nil {
|
||||
xUID, err := ToHost(0, uidMap)
|
||||
if err != nil {
|
||||
return -1, -1, err
|
||||
}
|
||||
uid = xUID
|
||||
uid, err := toHost(0, uidMap)
|
||||
if err != nil {
|
||||
return -1, -1, err
|
||||
}
|
||||
if gidMap != nil {
|
||||
xGID, err := ToHost(0, gidMap)
|
||||
if err != nil {
|
||||
return -1, -1, err
|
||||
}
|
||||
gid = xGID
|
||||
gid, err := toHost(0, gidMap)
|
||||
if err != nil {
|
||||
return -1, -1, err
|
||||
}
|
||||
return uid, gid, nil
|
||||
}
|
||||
|
||||
// ToContainer takes an id mapping, and uses it to translate a
|
||||
// toContainer takes an id mapping, and uses it to translate a
|
||||
// host ID to the remapped ID. If no map is provided, then the translation
|
||||
// assumes a 1-to-1 mapping and returns the passed in id
|
||||
func ToContainer(hostID int, idMap []IDMap) (int, error) {
|
||||
func toContainer(hostID int, idMap []IDMap) (int, error) {
|
||||
if idMap == nil {
|
||||
return hostID, nil
|
||||
}
|
||||
@@ -92,10 +99,10 @@ func ToContainer(hostID int, idMap []IDMap) (int, error) {
|
||||
return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID)
|
||||
}
|
||||
|
||||
// ToHost takes an id mapping and a remapped ID, and translates the
|
||||
// toHost takes an id mapping and a remapped ID, and translates the
|
||||
// ID to the mapped host ID. If no map is provided, then the translation
|
||||
// assumes a 1-to-1 mapping and returns the passed in id #
|
||||
func ToHost(contID int, idMap []IDMap) (int, error) {
|
||||
func toHost(contID int, idMap []IDMap) (int, error) {
|
||||
if idMap == nil {
|
||||
return contID, nil
|
||||
}
|
||||
@@ -108,26 +115,101 @@ func ToHost(contID int, idMap []IDMap) (int, error) {
|
||||
return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID)
|
||||
}
|
||||
|
||||
// CreateIDMappings takes a requested user and group name and
|
||||
// IDPair is a UID and GID pair
|
||||
type IDPair struct {
|
||||
UID int
|
||||
GID int
|
||||
}
|
||||
|
||||
// IDMappings contains a mappings of UIDs and GIDs
|
||||
type IDMappings struct {
|
||||
uids []IDMap
|
||||
gids []IDMap
|
||||
}
|
||||
|
||||
// NewIDMappings takes a requested user and group name and
|
||||
// using the data from /etc/sub{uid,gid} ranges, creates the
|
||||
// proper uid and gid remapping ranges for that user/group pair
|
||||
func CreateIDMappings(username, groupname string) ([]IDMap, []IDMap, error) {
|
||||
func NewIDMappings(username, groupname string) (*IDMappings, error) {
|
||||
subuidRanges, err := parseSubuid(username)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
subgidRanges, err := parseSubgid(groupname)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
if len(subuidRanges) == 0 {
|
||||
return nil, nil, fmt.Errorf("No subuid ranges found for user %q", username)
|
||||
return nil, fmt.Errorf("No subuid ranges found for user %q", username)
|
||||
}
|
||||
if len(subgidRanges) == 0 {
|
||||
return nil, nil, fmt.Errorf("No subgid ranges found for group %q", groupname)
|
||||
return nil, fmt.Errorf("No subgid ranges found for group %q", groupname)
|
||||
}
|
||||
|
||||
return createIDMap(subuidRanges), createIDMap(subgidRanges), nil
|
||||
return &IDMappings{
|
||||
uids: createIDMap(subuidRanges),
|
||||
gids: createIDMap(subgidRanges),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// NewIDMappingsFromMaps creates a new mapping from two slices
|
||||
// Deprecated: this is a temporary shim while transitioning to IDMapping
|
||||
func NewIDMappingsFromMaps(uids []IDMap, gids []IDMap) *IDMappings {
|
||||
return &IDMappings{uids: uids, gids: gids}
|
||||
}
|
||||
|
||||
// RootPair returns a uid and gid pair for the root user. The error is ignored
|
||||
// because a root user always exists, and the defaults are correct when the uid
|
||||
// and gid maps are empty.
|
||||
func (i *IDMappings) RootPair() IDPair {
|
||||
uid, gid, _ := GetRootUIDGID(i.uids, i.gids)
|
||||
return IDPair{UID: uid, GID: gid}
|
||||
}
|
||||
|
||||
// ToHost returns the host UID and GID for the container uid, gid.
|
||||
// Remapping is only performed if the ids aren't already the remapped root ids
|
||||
func (i *IDMappings) ToHost(pair IDPair) (IDPair, error) {
|
||||
var err error
|
||||
target := i.RootPair()
|
||||
|
||||
if pair.UID != target.UID {
|
||||
target.UID, err = toHost(pair.UID, i.uids)
|
||||
if err != nil {
|
||||
return target, err
|
||||
}
|
||||
}
|
||||
|
||||
if pair.GID != target.GID {
|
||||
target.GID, err = toHost(pair.GID, i.gids)
|
||||
}
|
||||
return target, err
|
||||
}
|
||||
|
||||
// ToContainer returns the container UID and GID for the host uid and gid
|
||||
func (i *IDMappings) ToContainer(pair IDPair) (int, int, error) {
|
||||
uid, err := toContainer(pair.UID, i.uids)
|
||||
if err != nil {
|
||||
return -1, -1, err
|
||||
}
|
||||
gid, err := toContainer(pair.GID, i.gids)
|
||||
return uid, gid, err
|
||||
}
|
||||
|
||||
// Empty returns true if there are no id mappings
|
||||
func (i *IDMappings) Empty() bool {
|
||||
return len(i.uids) == 0 && len(i.gids) == 0
|
||||
}
|
||||
|
||||
// UIDs return the UID mapping
|
||||
// TODO: remove this once everything has been refactored to use pairs
|
||||
func (i *IDMappings) UIDs() []IDMap {
|
||||
return i.uids
|
||||
}
|
||||
|
||||
// GIDs return the UID mapping
|
||||
// TODO: remove this once everything has been refactored to use pairs
|
||||
func (i *IDMappings) GIDs() []IDMap {
|
||||
return i.gids
|
||||
}
|
||||
|
||||
func createIDMap(subidRanges ranges) []IDMap {
|
||||
|
||||
6
vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go
generated
vendored
6
vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go
generated
vendored
@@ -69,15 +69,15 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown
|
||||
|
||||
// CanAccess takes a valid (existing) directory and a uid, gid pair and determines
|
||||
// if that uid, gid pair has access (execute bit) to the directory
|
||||
func CanAccess(path string, uid, gid int) bool {
|
||||
func CanAccess(path string, pair IDPair) bool {
|
||||
statInfo, err := system.Stat(path)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
fileMode := os.FileMode(statInfo.Mode())
|
||||
permBits := fileMode.Perm()
|
||||
return accessible(statInfo.UID() == uint32(uid),
|
||||
statInfo.GID() == uint32(gid), permBits)
|
||||
return accessible(statInfo.UID() == uint32(pair.UID),
|
||||
statInfo.GID() == uint32(pair.GID), permBits)
|
||||
}
|
||||
|
||||
func accessible(isOwner, isGroup bool, perms os.FileMode) bool {
|
||||
|
||||
2
vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go
generated
vendored
2
vendor/github.com/docker/docker/pkg/idtools/idtools_windows.go
generated
vendored
@@ -20,6 +20,6 @@ func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chown
|
||||
// CanAccess takes a valid (existing) directory and a uid, gid pair and determines
|
||||
// if that uid, gid pair has access (execute bit) to the directory
|
||||
// Windows does not require/support this function, so always return true
|
||||
func CanAccess(path string, uid, gid int) bool {
|
||||
func CanAccess(path string, pair IDPair) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
31
vendor/github.com/docker/docker/pkg/pools/pools.go
generated
vendored
31
vendor/github.com/docker/docker/pkg/pools/pools.go
generated
vendored
@@ -17,15 +17,16 @@ import (
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
)
|
||||
|
||||
const buffer32K = 32 * 1024
|
||||
|
||||
var (
|
||||
// BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer.
|
||||
BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K)
|
||||
// BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer.
|
||||
BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K)
|
||||
buffer32KPool = newBufferPoolWithSize(buffer32K)
|
||||
)
|
||||
|
||||
const buffer32K = 32 * 1024
|
||||
|
||||
// BufioReaderPool is a bufio reader that uses sync.Pool.
|
||||
type BufioReaderPool struct {
|
||||
pool sync.Pool
|
||||
@@ -54,11 +55,31 @@ func (bufPool *BufioReaderPool) Put(b *bufio.Reader) {
|
||||
bufPool.pool.Put(b)
|
||||
}
|
||||
|
||||
type bufferPool struct {
|
||||
pool sync.Pool
|
||||
}
|
||||
|
||||
func newBufferPoolWithSize(size int) *bufferPool {
|
||||
return &bufferPool{
|
||||
pool: sync.Pool{
|
||||
New: func() interface{} { return make([]byte, size) },
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (bp *bufferPool) Get() []byte {
|
||||
return bp.pool.Get().([]byte)
|
||||
}
|
||||
|
||||
func (bp *bufferPool) Put(b []byte) {
|
||||
bp.pool.Put(b)
|
||||
}
|
||||
|
||||
// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy.
|
||||
func Copy(dst io.Writer, src io.Reader) (written int64, err error) {
|
||||
buf := BufioReader32KPool.Get(src)
|
||||
written, err = io.Copy(dst, buf)
|
||||
BufioReader32KPool.Put(buf)
|
||||
buf := buffer32KPool.Get()
|
||||
written, err = io.CopyBuffer(dst, src, buf)
|
||||
buffer32KPool.Put(buf)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
21
vendor/github.com/docker/docker/pkg/system/syscall_windows.go
generated
vendored
21
vendor/github.com/docker/docker/pkg/system/syscall_windows.go
generated
vendored
@@ -8,8 +8,9 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
ntuserApiset = syscall.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0")
|
||||
procGetVersionExW = modkernel32.NewProc("GetVersionExW")
|
||||
ntuserApiset = syscall.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0")
|
||||
procGetVersionExW = modkernel32.NewProc("GetVersionExW")
|
||||
procGetProductInfo = modkernel32.NewProc("GetProductInfo")
|
||||
)
|
||||
|
||||
// OSVersion is a wrapper for Windows version information
|
||||
@@ -66,6 +67,22 @@ func IsWindowsClient() bool {
|
||||
return osviex.ProductType == verNTWorkstation
|
||||
}
|
||||
|
||||
// IsIoTCore returns true if the currently running image is based off of
|
||||
// Windows 10 IoT Core.
|
||||
// @engine maintainers - this function should not be removed or modified as it
|
||||
// is used to enforce licensing restrictions on Windows.
|
||||
func IsIoTCore() bool {
|
||||
var returnedProductType uint32
|
||||
r1, _, err := procGetProductInfo.Call(6, 1, 0, 0, uintptr(unsafe.Pointer(&returnedProductType)))
|
||||
if r1 == 0 {
|
||||
logrus.Warnf("GetProductInfo failed - assuming this is not IoT: %v", err)
|
||||
return false
|
||||
}
|
||||
const productIoTUAP = 0x0000007B
|
||||
const productIoTUAPCommercial = 0x00000083
|
||||
return returnedProductType == productIoTUAP || returnedProductType == productIoTUAPCommercial
|
||||
}
|
||||
|
||||
// Unmount is a platform-specific helper function to call
|
||||
// the unmount syscall. Not supported on Windows
|
||||
func Unmount(dest string) error {
|
||||
|
||||
2
vendor/github.com/docker/docker/pkg/term/termios_bsd.go
generated
vendored
2
vendor/github.com/docker/docker/pkg/term/termios_bsd.go
generated
vendored
@@ -27,7 +27,7 @@ func MakeRaw(fd uintptr) (*State, error) {
|
||||
|
||||
newState := oldState.termios
|
||||
newState.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON)
|
||||
newState.Oflag |= unix.OPOST
|
||||
newState.Oflag &^= unix.OPOST
|
||||
newState.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN)
|
||||
newState.Cflag &^= (unix.CSIZE | unix.PARENB)
|
||||
newState.Cflag |= unix.CS8
|
||||
|
||||
2
vendor/github.com/docker/docker/pkg/term/termios_linux.go
generated
vendored
2
vendor/github.com/docker/docker/pkg/term/termios_linux.go
generated
vendored
@@ -26,7 +26,7 @@ func MakeRaw(fd uintptr) (*State, error) {
|
||||
newState := oldState.termios
|
||||
|
||||
newState.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON)
|
||||
newState.Oflag |= unix.OPOST
|
||||
newState.Oflag &^= unix.OPOST
|
||||
newState.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN)
|
||||
newState.Cflag &^= (unix.CSIZE | unix.PARENB)
|
||||
newState.Cflag |= unix.CS8
|
||||
|
||||
1
vendor/github.com/fsouza/go-dockerclient/AUTHORS
generated
vendored
1
vendor/github.com/fsouza/go-dockerclient/AUTHORS
generated
vendored
@@ -49,6 +49,7 @@ Darren Shepherd
|
||||
Dave Choi
|
||||
David Huie
|
||||
Dawn Chen
|
||||
Derek Petersen
|
||||
Dinesh Subhraveti
|
||||
Drew Wells
|
||||
Ed
|
||||
|
||||
4
vendor/github.com/fsouza/go-dockerclient/container.go
generated
vendored
4
vendor/github.com/fsouza/go-dockerclient/container.go
generated
vendored
@@ -736,6 +736,10 @@ type HostConfig struct {
|
||||
AutoRemove bool `json:"AutoRemove,omitempty" yaml:"AutoRemove,omitempty" toml:"AutoRemove,omitempty"`
|
||||
StorageOpt map[string]string `json:"StorageOpt,omitempty" yaml:"StorageOpt,omitempty" toml:"StorageOpt,omitempty"`
|
||||
Sysctls map[string]string `json:"Sysctls,omitempty" yaml:"Sysctls,omitempty" toml:"Sysctls,omitempty"`
|
||||
CPUCount int64 `json:"CpuCount,omitempty" yaml:"CpuCount,omitempty"`
|
||||
CPUPercent int64 `json:"CpuPercent,omitempty" yaml:"CpuPercent,omitempty"`
|
||||
IOMaximumBandwidth int64 `json:"IOMaximumBandwidth,omitempty" yaml:"IOMaximumBandwidth,omitempty"`
|
||||
IOMaximumIOps int64 `json:"IOMaximumIOps,omitempty" yaml:"IOMaximumIOps,omitempty"`
|
||||
}
|
||||
|
||||
// NetworkingConfig represents the container's networking configuration for each of its interfaces
|
||||
|
||||
44
vendor/github.com/fullsailor/pkcs7/ber.go
generated
vendored
44
vendor/github.com/fullsailor/pkcs7/ber.go
generated
vendored
@@ -161,7 +161,7 @@ func readObject(ber []byte, offset int) (asn1Object, int, error) {
|
||||
var length int
|
||||
l := ber[offset]
|
||||
offset++
|
||||
hack := 0
|
||||
indefinite := false
|
||||
if l > 0x80 {
|
||||
numberOfBytes := (int)(l & 0x7F)
|
||||
if numberOfBytes > 4 { // int is only guaranteed to be 32bit
|
||||
@@ -180,14 +180,7 @@ func readObject(ber []byte, offset int) (asn1Object, int, error) {
|
||||
offset++
|
||||
}
|
||||
} else if l == 0x80 {
|
||||
// find length by searching content
|
||||
markerIndex := bytes.LastIndex(ber[offset:], []byte{0x0, 0x0})
|
||||
if markerIndex == -1 {
|
||||
return nil, 0, errors.New("ber2der: Invalid BER format")
|
||||
}
|
||||
length = markerIndex
|
||||
hack = 2
|
||||
//fmt.Printf("--> (compute length) marker found at offset: %d\n", markerIndex+offset)
|
||||
indefinite = true
|
||||
} else {
|
||||
length = (int)(l)
|
||||
}
|
||||
@@ -201,6 +194,9 @@ func readObject(ber []byte, offset int) (asn1Object, int, error) {
|
||||
//fmt.Printf("--> content end : %d\n", contentEnd)
|
||||
//fmt.Printf("--> content : % X\n", ber[offset:contentEnd])
|
||||
var obj asn1Object
|
||||
if indefinite && kind == 0 {
|
||||
return nil, 0, errors.New("ber2der: Indefinite form tag must have constructed encoding")
|
||||
}
|
||||
if kind == 0 {
|
||||
obj = asn1Primitive{
|
||||
tagBytes: ber[tagStart:tagEnd],
|
||||
@@ -209,14 +205,25 @@ func readObject(ber []byte, offset int) (asn1Object, int, error) {
|
||||
}
|
||||
} else {
|
||||
var subObjects []asn1Object
|
||||
for offset < contentEnd {
|
||||
for (offset < contentEnd) || indefinite {
|
||||
var subObj asn1Object
|
||||
var err error
|
||||
subObj, offset, err = readObject(ber[:contentEnd], offset)
|
||||
subObj, offset, err = readObject(ber, offset)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
subObjects = append(subObjects, subObj)
|
||||
|
||||
if indefinite {
|
||||
terminated, err := isIndefiniteTermination(ber, offset)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
if terminated {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
obj = asn1Structured{
|
||||
tagBytes: ber[tagStart:tagEnd],
|
||||
@@ -224,5 +231,18 @@ func readObject(ber []byte, offset int) (asn1Object, int, error) {
|
||||
}
|
||||
}
|
||||
|
||||
return obj, contentEnd + hack, nil
|
||||
// Apply indefinite form length with 0x0000 terminator.
|
||||
if indefinite {
|
||||
contentEnd = offset + 2
|
||||
}
|
||||
|
||||
return obj, contentEnd, nil
|
||||
}
|
||||
|
||||
func isIndefiniteTermination(ber []byte, offset int) (bool, error) {
|
||||
if len(ber) - offset < 2 {
|
||||
return false, errors.New("ber2der: Invalid BER format")
|
||||
}
|
||||
|
||||
return bytes.Index(ber[offset:], []byte{0x0, 0x0}) == 0, nil
|
||||
}
|
||||
|
||||
6
vendor/github.com/go-sql-driver/mysql/AUTHORS
generated
vendored
6
vendor/github.com/go-sql-driver/mysql/AUTHORS
generated
vendored
@@ -12,8 +12,10 @@
|
||||
# Individual Persons
|
||||
|
||||
Aaron Hopkins <go-sql-driver at die.net>
|
||||
Achille Roussel <achille.roussel at gmail.com>
|
||||
Arne Hormann <arnehormann at gmail.com>
|
||||
Asta Xie <xiemengjun at gmail.com>
|
||||
Bulat Gaifullin <gaifullinbf at gmail.com>
|
||||
Carlos Nieto <jose.carlos at menteslibres.net>
|
||||
Chris Moos <chris at tech9computers.com>
|
||||
Daniel Nichter <nil at codenode.com>
|
||||
@@ -21,11 +23,13 @@ Daniël van Eeden <git at myname.nl>
|
||||
Dave Protasowski <dprotaso at gmail.com>
|
||||
DisposaBoy <disposaboy at dby.me>
|
||||
Egor Smolyakov <egorsmkv at gmail.com>
|
||||
Evan Shaw <evan at vendhq.com>
|
||||
Frederick Mayle <frederickmayle at gmail.com>
|
||||
Gustavo Kristic <gkristic at gmail.com>
|
||||
Hanno Braun <mail at hannobraun.com>
|
||||
Henri Yandell <flamefew at gmail.com>
|
||||
Hirotaka Yamamoto <ymmt2005 at gmail.com>
|
||||
ICHINOSE Shogo <shogo82148 at gmail.com>
|
||||
INADA Naoki <songofacandy at gmail.com>
|
||||
Jacek Szwec <szwec.jacek at gmail.com>
|
||||
James Harr <james.harr at gmail.com>
|
||||
@@ -42,9 +46,11 @@ Lion Yang <lion at aosc.xyz>
|
||||
Luca Looz <luca.looz92 at gmail.com>
|
||||
Lucas Liu <extrafliu at gmail.com>
|
||||
Luke Scott <luke at webconnex.com>
|
||||
Maciej Zimnoch <maciej.zimnoch@codilime.com>
|
||||
Michael Woolnough <michael.woolnough at gmail.com>
|
||||
Nicola Peduzzi <thenikso at gmail.com>
|
||||
Olivier Mengué <dolmen at cpan.org>
|
||||
oscarzhao <oscarzhaosl at gmail.com>
|
||||
Paul Bonser <misterpib at gmail.com>
|
||||
Peter Schultz <peter.schultz at classmarkets.com>
|
||||
Rebecca Chin <rchin at pivotal.io>
|
||||
|
||||
8
vendor/github.com/go-sql-driver/mysql/README.md
generated
vendored
8
vendor/github.com/go-sql-driver/mysql/README.md
generated
vendored
@@ -19,6 +19,7 @@ A MySQL-Driver for Go's [database/sql](https://golang.org/pkg/database/sql/) pac
|
||||
* [LOAD DATA LOCAL INFILE support](#load-data-local-infile-support)
|
||||
* [time.Time support](#timetime-support)
|
||||
* [Unicode support](#unicode-support)
|
||||
* [context.Context Support](#contextcontext-support)
|
||||
* [Testing / Development](#testing--development)
|
||||
* [License](#license)
|
||||
|
||||
@@ -38,7 +39,7 @@ A MySQL-Driver for Go's [database/sql](https://golang.org/pkg/database/sql/) pac
|
||||
* Optional placeholder interpolation
|
||||
|
||||
## Requirements
|
||||
* Go 1.2 or higher
|
||||
* Go 1.5 or higher
|
||||
* MySQL (4.1+), MariaDB, Percona Server, Google CloudSQL or Sphinx (2.2.3+)
|
||||
|
||||
---------------------------------------
|
||||
@@ -278,7 +279,7 @@ Default: false
|
||||
|
||||
`rejectreadOnly=true` causes the driver to reject read-only connections. This
|
||||
is for a possible race condition during an automatic failover, where the mysql
|
||||
client gets connected to a read-only replica after the failover.
|
||||
client gets connected to a read-only replica after the failover.
|
||||
|
||||
Note that this should be a fairly rare case, as an automatic failover normally
|
||||
happens when the primary is down, and the race condition shouldn't happen
|
||||
@@ -443,6 +444,9 @@ Version 1.0 of the driver recommended adding `&charset=utf8` (alias for `SET NAM
|
||||
|
||||
See http://dev.mysql.com/doc/refman/5.7/en/charset-unicode.html for more details on MySQL's Unicode support.
|
||||
|
||||
## `context.Context` Support
|
||||
Go 1.8 added `database/sql` support for `context.Context`. This driver supports query timeouts and cancellation via contexts.
|
||||
See [context support in the database/sql package](https://golang.org/doc/go1.8#database_sql) for more details.
|
||||
|
||||
## Testing / Development
|
||||
To run the driver tests you may need to adjust the configuration. See the [Testing Wiki-Page](https://github.com/go-sql-driver/mysql/wiki/Testing "Testing") for details.
|
||||
|
||||
79
vendor/github.com/go-sql-driver/mysql/connection.go
generated
vendored
79
vendor/github.com/go-sql-driver/mysql/connection.go
generated
vendored
@@ -17,6 +17,16 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// a copy of context.Context for Go 1.7 and earlier
|
||||
type mysqlContext interface {
|
||||
Done() <-chan struct{}
|
||||
Err() error
|
||||
|
||||
// defined in context.Context, but not used in this driver:
|
||||
// Deadline() (deadline time.Time, ok bool)
|
||||
// Value(key interface{}) interface{}
|
||||
}
|
||||
|
||||
type mysqlConn struct {
|
||||
buf buffer
|
||||
netConn net.Conn
|
||||
@@ -31,6 +41,14 @@ type mysqlConn struct {
|
||||
sequence uint8
|
||||
parseTime bool
|
||||
strict bool
|
||||
|
||||
// for context support (Go 1.8+)
|
||||
watching bool
|
||||
watcher chan<- mysqlContext
|
||||
closech chan struct{}
|
||||
finished chan<- struct{}
|
||||
canceled atomicError // set non-nil if conn is canceled
|
||||
closed atomicBool // set when conn is closed, before closech is closed
|
||||
}
|
||||
|
||||
// Handles parameters set in DSN after the connection is established
|
||||
@@ -64,7 +82,7 @@ func (mc *mysqlConn) handleParams() (err error) {
|
||||
}
|
||||
|
||||
func (mc *mysqlConn) Begin() (driver.Tx, error) {
|
||||
if mc.netConn == nil {
|
||||
if mc.closed.IsSet() {
|
||||
errLog.Print(ErrInvalidConn)
|
||||
return nil, driver.ErrBadConn
|
||||
}
|
||||
@@ -78,7 +96,7 @@ func (mc *mysqlConn) Begin() (driver.Tx, error) {
|
||||
|
||||
func (mc *mysqlConn) Close() (err error) {
|
||||
// Makes Close idempotent
|
||||
if mc.netConn != nil {
|
||||
if !mc.closed.IsSet() {
|
||||
err = mc.writeCommandPacket(comQuit)
|
||||
}
|
||||
|
||||
@@ -92,19 +110,32 @@ func (mc *mysqlConn) Close() (err error) {
|
||||
// is called before auth or on auth failure because MySQL will have already
|
||||
// closed the network connection.
|
||||
func (mc *mysqlConn) cleanup() {
|
||||
// Makes cleanup idempotent
|
||||
if mc.netConn != nil {
|
||||
if err := mc.netConn.Close(); err != nil {
|
||||
errLog.Print(err)
|
||||
}
|
||||
mc.netConn = nil
|
||||
if !mc.closed.TrySet(true) {
|
||||
return
|
||||
}
|
||||
mc.cfg = nil
|
||||
mc.buf.nc = nil
|
||||
|
||||
// Makes cleanup idempotent
|
||||
close(mc.closech)
|
||||
if mc.netConn == nil {
|
||||
return
|
||||
}
|
||||
if err := mc.netConn.Close(); err != nil {
|
||||
errLog.Print(err)
|
||||
}
|
||||
}
|
||||
|
||||
func (mc *mysqlConn) error() error {
|
||||
if mc.closed.IsSet() {
|
||||
if err := mc.canceled.Value(); err != nil {
|
||||
return err
|
||||
}
|
||||
return ErrInvalidConn
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) {
|
||||
if mc.netConn == nil {
|
||||
if mc.closed.IsSet() {
|
||||
errLog.Print(ErrInvalidConn)
|
||||
return nil, driver.ErrBadConn
|
||||
}
|
||||
@@ -258,7 +289,7 @@ func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (strin
|
||||
}
|
||||
|
||||
func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, error) {
|
||||
if mc.netConn == nil {
|
||||
if mc.closed.IsSet() {
|
||||
errLog.Print(ErrInvalidConn)
|
||||
return nil, driver.ErrBadConn
|
||||
}
|
||||
@@ -315,7 +346,11 @@ func (mc *mysqlConn) exec(query string) error {
|
||||
}
|
||||
|
||||
func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, error) {
|
||||
if mc.netConn == nil {
|
||||
return mc.query(query, args)
|
||||
}
|
||||
|
||||
func (mc *mysqlConn) query(query string, args []driver.Value) (*textRows, error) {
|
||||
if mc.closed.IsSet() {
|
||||
errLog.Print(ErrInvalidConn)
|
||||
return nil, driver.ErrBadConn
|
||||
}
|
||||
@@ -387,3 +422,21 @@ func (mc *mysqlConn) getSystemVar(name string) ([]byte, error) {
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// finish is called when the query has canceled.
|
||||
func (mc *mysqlConn) cancel(err error) {
|
||||
mc.canceled.Set(err)
|
||||
mc.cleanup()
|
||||
}
|
||||
|
||||
// finish is called when the query has succeeded.
|
||||
func (mc *mysqlConn) finish() {
|
||||
if !mc.watching || mc.finished == nil {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case mc.finished <- struct{}{}:
|
||||
mc.watching = false
|
||||
case <-mc.closech:
|
||||
}
|
||||
}
|
||||
|
||||
204
vendor/github.com/go-sql-driver/mysql/connection_go18.go
generated
vendored
Normal file
204
vendor/github.com/go-sql-driver/mysql/connection_go18.go
generated
vendored
Normal file
@@ -0,0 +1,204 @@
|
||||
// Go MySQL Driver - A MySQL-Driver for Go's database/sql package
|
||||
//
|
||||
// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved.
|
||||
//
|
||||
// This Source Code Form is subject to the terms of the Mozilla Public
|
||||
// License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
// You can obtain one at http://mozilla.org/MPL/2.0/.
|
||||
|
||||
// +build go1.8
|
||||
|
||||
package mysql
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"errors"
|
||||
)
|
||||
|
||||
// Ping implements driver.Pinger interface
|
||||
func (mc *mysqlConn) Ping(ctx context.Context) error {
|
||||
if mc.closed.IsSet() {
|
||||
errLog.Print(ErrInvalidConn)
|
||||
return driver.ErrBadConn
|
||||
}
|
||||
|
||||
if err := mc.watchCancel(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
defer mc.finish()
|
||||
|
||||
if err := mc.writeCommandPacket(comPing); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := mc.readResultOK(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// BeginTx implements driver.ConnBeginTx interface
|
||||
func (mc *mysqlConn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) {
|
||||
if opts.ReadOnly {
|
||||
// TODO: support read-only transactions
|
||||
return nil, errors.New("mysql: read-only transactions not supported")
|
||||
}
|
||||
|
||||
if err := mc.watchCancel(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer mc.finish()
|
||||
|
||||
if sql.IsolationLevel(opts.Isolation) != sql.LevelDefault {
|
||||
level, err := mapIsolationLevel(opts.Isolation)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = mc.exec("SET TRANSACTION ISOLATION LEVEL " + level)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return mc.Begin()
|
||||
}
|
||||
|
||||
func (mc *mysqlConn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) {
|
||||
dargs, err := namedValueToValue(args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := mc.watchCancel(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rows, err := mc.query(query, dargs)
|
||||
if err != nil {
|
||||
mc.finish()
|
||||
return nil, err
|
||||
}
|
||||
rows.finish = mc.finish
|
||||
return rows, err
|
||||
}
|
||||
|
||||
func (mc *mysqlConn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) {
|
||||
dargs, err := namedValueToValue(args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := mc.watchCancel(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer mc.finish()
|
||||
|
||||
return mc.Exec(query, dargs)
|
||||
}
|
||||
|
||||
func (mc *mysqlConn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) {
|
||||
if err := mc.watchCancel(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
stmt, err := mc.Prepare(query)
|
||||
mc.finish()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
select {
|
||||
default:
|
||||
case <-ctx.Done():
|
||||
stmt.Close()
|
||||
return nil, ctx.Err()
|
||||
}
|
||||
return stmt, nil
|
||||
}
|
||||
|
||||
func (stmt *mysqlStmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) {
|
||||
dargs, err := namedValueToValue(args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := stmt.mc.watchCancel(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
rows, err := stmt.query(dargs)
|
||||
if err != nil {
|
||||
stmt.mc.finish()
|
||||
return nil, err
|
||||
}
|
||||
rows.finish = stmt.mc.finish
|
||||
return rows, err
|
||||
}
|
||||
|
||||
func (stmt *mysqlStmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) {
|
||||
dargs, err := namedValueToValue(args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := stmt.mc.watchCancel(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer stmt.mc.finish()
|
||||
|
||||
return stmt.Exec(dargs)
|
||||
}
|
||||
|
||||
func (mc *mysqlConn) watchCancel(ctx context.Context) error {
|
||||
if mc.watching {
|
||||
// Reach here if canceled,
|
||||
// so the connection is already invalid
|
||||
mc.cleanup()
|
||||
return nil
|
||||
}
|
||||
if ctx.Done() == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
mc.watching = true
|
||||
select {
|
||||
default:
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
if mc.watcher == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
mc.watcher <- ctx
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mc *mysqlConn) startWatcher() {
|
||||
watcher := make(chan mysqlContext, 1)
|
||||
mc.watcher = watcher
|
||||
finished := make(chan struct{})
|
||||
mc.finished = finished
|
||||
go func() {
|
||||
for {
|
||||
var ctx mysqlContext
|
||||
select {
|
||||
case ctx = <-watcher:
|
||||
case <-mc.closech:
|
||||
return
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
mc.cancel(ctx.Err())
|
||||
case <-finished:
|
||||
case <-mc.closech:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
11
vendor/github.com/go-sql-driver/mysql/driver.go
generated
vendored
11
vendor/github.com/go-sql-driver/mysql/driver.go
generated
vendored
@@ -22,6 +22,11 @@ import (
|
||||
"net"
|
||||
)
|
||||
|
||||
// watcher interface is used for context support (From Go 1.8)
|
||||
type watcher interface {
|
||||
startWatcher()
|
||||
}
|
||||
|
||||
// MySQLDriver is exported to make the driver directly accessible.
|
||||
// In general the driver is used via the database/sql package.
|
||||
type MySQLDriver struct{}
|
||||
@@ -52,6 +57,7 @@ func (d MySQLDriver) Open(dsn string) (driver.Conn, error) {
|
||||
mc := &mysqlConn{
|
||||
maxAllowedPacket: maxPacketSize,
|
||||
maxWriteSize: maxPacketSize - 1,
|
||||
closech: make(chan struct{}),
|
||||
}
|
||||
mc.cfg, err = ParseDSN(dsn)
|
||||
if err != nil {
|
||||
@@ -60,6 +66,11 @@ func (d MySQLDriver) Open(dsn string) (driver.Conn, error) {
|
||||
mc.parseTime = mc.cfg.ParseTime
|
||||
mc.strict = mc.cfg.Strict
|
||||
|
||||
// Call startWatcher for context support (From Go 1.8)
|
||||
if s, ok := interface{}(mc).(watcher); ok {
|
||||
s.startWatcher()
|
||||
}
|
||||
|
||||
// Connect to Server
|
||||
if dial, ok := dials[mc.cfg.Net]; ok {
|
||||
mc.netConn, err = dial(mc.cfg.Addr)
|
||||
|
||||
4
vendor/github.com/go-sql-driver/mysql/dsn.go
generated
vendored
4
vendor/github.com/go-sql-driver/mysql/dsn.go
generated
vendored
@@ -528,9 +528,7 @@ func parseDSNParams(cfg *Config, params string) (err error) {
|
||||
return fmt.Errorf("invalid value for TLS config name: %v", err)
|
||||
}
|
||||
|
||||
if tlsConfig, ok := tlsConfigRegister[name]; ok {
|
||||
tlsConfig = cloneTLSConfig(tlsConfig)
|
||||
|
||||
if tlsConfig := getTLSConfigClone(name); tlsConfig != nil {
|
||||
if len(tlsConfig.ServerName) == 0 && !tlsConfig.InsecureSkipVerify {
|
||||
host, _, err := net.SplitHostPort(cfg.Addr)
|
||||
if err == nil {
|
||||
|
||||
23
vendor/github.com/go-sql-driver/mysql/packets.go
generated
vendored
23
vendor/github.com/go-sql-driver/mysql/packets.go
generated
vendored
@@ -30,6 +30,9 @@ func (mc *mysqlConn) readPacket() ([]byte, error) {
|
||||
// read packet header
|
||||
data, err := mc.buf.readNext(4)
|
||||
if err != nil {
|
||||
if cerr := mc.canceled.Value(); cerr != nil {
|
||||
return nil, cerr
|
||||
}
|
||||
errLog.Print(err)
|
||||
mc.Close()
|
||||
return nil, driver.ErrBadConn
|
||||
@@ -63,6 +66,9 @@ func (mc *mysqlConn) readPacket() ([]byte, error) {
|
||||
// read packet body [pktLen bytes]
|
||||
data, err = mc.buf.readNext(pktLen)
|
||||
if err != nil {
|
||||
if cerr := mc.canceled.Value(); cerr != nil {
|
||||
return nil, cerr
|
||||
}
|
||||
errLog.Print(err)
|
||||
mc.Close()
|
||||
return nil, driver.ErrBadConn
|
||||
@@ -125,8 +131,13 @@ func (mc *mysqlConn) writePacket(data []byte) error {
|
||||
|
||||
// Handle error
|
||||
if err == nil { // n != len(data)
|
||||
mc.cleanup()
|
||||
errLog.Print(ErrMalformPkt)
|
||||
} else {
|
||||
if cerr := mc.canceled.Value(); cerr != nil {
|
||||
return cerr
|
||||
}
|
||||
mc.cleanup()
|
||||
errLog.Print(err)
|
||||
}
|
||||
return driver.ErrBadConn
|
||||
@@ -1067,17 +1078,19 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
|
||||
paramTypes[i+i] = fieldTypeString
|
||||
paramTypes[i+i+1] = 0x00
|
||||
|
||||
var val []byte
|
||||
var a [64]byte
|
||||
var b = a[:0]
|
||||
|
||||
if v.IsZero() {
|
||||
val = []byte("0000-00-00")
|
||||
b = append(b, "0000-00-00"...)
|
||||
} else {
|
||||
val = []byte(v.In(mc.cfg.Loc).Format(timeFormat))
|
||||
b = v.In(mc.cfg.Loc).AppendFormat(b, timeFormat)
|
||||
}
|
||||
|
||||
paramValues = appendLengthEncodedInteger(paramValues,
|
||||
uint64(len(val)),
|
||||
uint64(len(b)),
|
||||
)
|
||||
paramValues = append(paramValues, val...)
|
||||
paramValues = append(paramValues, b...)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("can not convert type: %T", arg)
|
||||
|
||||
26
vendor/github.com/go-sql-driver/mysql/rows.go
generated
vendored
26
vendor/github.com/go-sql-driver/mysql/rows.go
generated
vendored
@@ -28,8 +28,9 @@ type resultSet struct {
|
||||
}
|
||||
|
||||
type mysqlRows struct {
|
||||
mc *mysqlConn
|
||||
rs resultSet
|
||||
mc *mysqlConn
|
||||
rs resultSet
|
||||
finish func()
|
||||
}
|
||||
|
||||
type binaryRows struct {
|
||||
@@ -65,12 +66,17 @@ func (rows *mysqlRows) Columns() []string {
|
||||
}
|
||||
|
||||
func (rows *mysqlRows) Close() (err error) {
|
||||
if f := rows.finish; f != nil {
|
||||
f()
|
||||
rows.finish = nil
|
||||
}
|
||||
|
||||
mc := rows.mc
|
||||
if mc == nil {
|
||||
return nil
|
||||
}
|
||||
if mc.netConn == nil {
|
||||
return ErrInvalidConn
|
||||
if err := mc.error(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Remove unread packets from stream
|
||||
@@ -98,8 +104,8 @@ func (rows *mysqlRows) nextResultSet() (int, error) {
|
||||
if rows.mc == nil {
|
||||
return 0, io.EOF
|
||||
}
|
||||
if rows.mc.netConn == nil {
|
||||
return 0, ErrInvalidConn
|
||||
if err := rows.mc.error(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Remove unread packets from stream
|
||||
@@ -145,8 +151,8 @@ func (rows *binaryRows) NextResultSet() error {
|
||||
|
||||
func (rows *binaryRows) Next(dest []driver.Value) error {
|
||||
if mc := rows.mc; mc != nil {
|
||||
if mc.netConn == nil {
|
||||
return ErrInvalidConn
|
||||
if err := mc.error(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Fetch next row from stream
|
||||
@@ -167,8 +173,8 @@ func (rows *textRows) NextResultSet() (err error) {
|
||||
|
||||
func (rows *textRows) Next(dest []driver.Value) error {
|
||||
if mc := rows.mc; mc != nil {
|
||||
if mc.netConn == nil {
|
||||
return ErrInvalidConn
|
||||
if err := mc.error(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Fetch next row from stream
|
||||
|
||||
10
vendor/github.com/go-sql-driver/mysql/statement.go
generated
vendored
10
vendor/github.com/go-sql-driver/mysql/statement.go
generated
vendored
@@ -23,7 +23,7 @@ type mysqlStmt struct {
|
||||
}
|
||||
|
||||
func (stmt *mysqlStmt) Close() error {
|
||||
if stmt.mc == nil || stmt.mc.netConn == nil {
|
||||
if stmt.mc == nil || stmt.mc.closed.IsSet() {
|
||||
// driver.Stmt.Close can be called more than once, thus this function
|
||||
// has to be idempotent.
|
||||
// See also Issue #450 and golang/go#16019.
|
||||
@@ -45,7 +45,7 @@ func (stmt *mysqlStmt) ColumnConverter(idx int) driver.ValueConverter {
|
||||
}
|
||||
|
||||
func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) {
|
||||
if stmt.mc.netConn == nil {
|
||||
if stmt.mc.closed.IsSet() {
|
||||
errLog.Print(ErrInvalidConn)
|
||||
return nil, driver.ErrBadConn
|
||||
}
|
||||
@@ -89,7 +89,11 @@ func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) {
|
||||
}
|
||||
|
||||
func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) {
|
||||
if stmt.mc.netConn == nil {
|
||||
return stmt.query(args)
|
||||
}
|
||||
|
||||
func (stmt *mysqlStmt) query(args []driver.Value) (*binaryRows, error) {
|
||||
if stmt.mc.closed.IsSet() {
|
||||
errLog.Print(ErrInvalidConn)
|
||||
return nil, driver.ErrBadConn
|
||||
}
|
||||
|
||||
4
vendor/github.com/go-sql-driver/mysql/transaction.go
generated
vendored
4
vendor/github.com/go-sql-driver/mysql/transaction.go
generated
vendored
@@ -13,7 +13,7 @@ type mysqlTx struct {
|
||||
}
|
||||
|
||||
func (tx *mysqlTx) Commit() (err error) {
|
||||
if tx.mc == nil || tx.mc.netConn == nil {
|
||||
if tx.mc == nil || tx.mc.closed.IsSet() {
|
||||
return ErrInvalidConn
|
||||
}
|
||||
err = tx.mc.exec("COMMIT")
|
||||
@@ -22,7 +22,7 @@ func (tx *mysqlTx) Commit() (err error) {
|
||||
}
|
||||
|
||||
func (tx *mysqlTx) Rollback() (err error) {
|
||||
if tx.mc == nil || tx.mc.netConn == nil {
|
||||
if tx.mc == nil || tx.mc.closed.IsSet() {
|
||||
return ErrInvalidConn
|
||||
}
|
||||
err = tx.mc.exec("ROLLBACK")
|
||||
|
||||
80
vendor/github.com/go-sql-driver/mysql/utils.go
generated
vendored
80
vendor/github.com/go-sql-driver/mysql/utils.go
generated
vendored
@@ -16,10 +16,13 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
tlsConfigLock sync.RWMutex
|
||||
tlsConfigRegister map[string]*tls.Config // Register for custom tls.Configs
|
||||
)
|
||||
|
||||
@@ -53,19 +56,32 @@ func RegisterTLSConfig(key string, config *tls.Config) error {
|
||||
return fmt.Errorf("key '%s' is reserved", key)
|
||||
}
|
||||
|
||||
tlsConfigLock.Lock()
|
||||
if tlsConfigRegister == nil {
|
||||
tlsConfigRegister = make(map[string]*tls.Config)
|
||||
}
|
||||
|
||||
tlsConfigRegister[key] = config
|
||||
tlsConfigLock.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeregisterTLSConfig removes the tls.Config associated with key.
|
||||
func DeregisterTLSConfig(key string) {
|
||||
tlsConfigLock.Lock()
|
||||
if tlsConfigRegister != nil {
|
||||
delete(tlsConfigRegister, key)
|
||||
}
|
||||
tlsConfigLock.Unlock()
|
||||
}
|
||||
|
||||
func getTLSConfigClone(key string) (config *tls.Config) {
|
||||
tlsConfigLock.RLock()
|
||||
if v, ok := tlsConfigRegister[key]; ok {
|
||||
config = cloneTLSConfig(v)
|
||||
}
|
||||
tlsConfigLock.RUnlock()
|
||||
return
|
||||
}
|
||||
|
||||
// Returns the bool value of the input.
|
||||
@@ -740,3 +756,67 @@ func escapeStringQuotes(buf []byte, v string) []byte {
|
||||
|
||||
return buf[:pos]
|
||||
}
|
||||
|
||||
/******************************************************************************
|
||||
* Sync utils *
|
||||
******************************************************************************/
|
||||
|
||||
// noCopy may be embedded into structs which must not be copied
|
||||
// after the first use.
|
||||
//
|
||||
// See https://github.com/golang/go/issues/8005#issuecomment-190753527
|
||||
// for details.
|
||||
type noCopy struct{}
|
||||
|
||||
// Lock is a no-op used by -copylocks checker from `go vet`.
|
||||
func (*noCopy) Lock() {}
|
||||
|
||||
// atomicBool is a wrapper around uint32 for usage as a boolean value with
|
||||
// atomic access.
|
||||
type atomicBool struct {
|
||||
_noCopy noCopy
|
||||
value uint32
|
||||
}
|
||||
|
||||
// IsSet returns wether the current boolean value is true
|
||||
func (ab *atomicBool) IsSet() bool {
|
||||
return atomic.LoadUint32(&ab.value) > 0
|
||||
}
|
||||
|
||||
// Set sets the value of the bool regardless of the previous value
|
||||
func (ab *atomicBool) Set(value bool) {
|
||||
if value {
|
||||
atomic.StoreUint32(&ab.value, 1)
|
||||
} else {
|
||||
atomic.StoreUint32(&ab.value, 0)
|
||||
}
|
||||
}
|
||||
|
||||
// TrySet sets the value of the bool and returns wether the value changed
|
||||
func (ab *atomicBool) TrySet(value bool) bool {
|
||||
if value {
|
||||
return atomic.SwapUint32(&ab.value, 1) == 0
|
||||
}
|
||||
return atomic.SwapUint32(&ab.value, 0) > 0
|
||||
}
|
||||
|
||||
// atomicBool is a wrapper for atomically accessed error values
|
||||
type atomicError struct {
|
||||
_noCopy noCopy
|
||||
value atomic.Value
|
||||
}
|
||||
|
||||
// Set sets the error value regardless of the previous value.
|
||||
// The value must not be nil
|
||||
func (ae *atomicError) Set(value error) {
|
||||
ae.value.Store(value)
|
||||
}
|
||||
|
||||
// Value returns the current error value
|
||||
func (ae *atomicError) Value() error {
|
||||
if v := ae.value.Load(); v != nil {
|
||||
// this will panic if the value doesn't implement the error interface
|
||||
return v.(error)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
34
vendor/github.com/go-sql-driver/mysql/utils_go18.go
generated
vendored
34
vendor/github.com/go-sql-driver/mysql/utils_go18.go
generated
vendored
@@ -10,8 +10,40 @@
|
||||
|
||||
package mysql
|
||||
|
||||
import "crypto/tls"
|
||||
import (
|
||||
"crypto/tls"
|
||||
"database/sql"
|
||||
"database/sql/driver"
|
||||
"errors"
|
||||
)
|
||||
|
||||
func cloneTLSConfig(c *tls.Config) *tls.Config {
|
||||
return c.Clone()
|
||||
}
|
||||
|
||||
func namedValueToValue(named []driver.NamedValue) ([]driver.Value, error) {
|
||||
dargs := make([]driver.Value, len(named))
|
||||
for n, param := range named {
|
||||
if len(param.Name) > 0 {
|
||||
// TODO: support the use of Named Parameters #561
|
||||
return nil, errors.New("mysql: driver does not support the use of Named Parameters")
|
||||
}
|
||||
dargs[n] = param.Value
|
||||
}
|
||||
return dargs, nil
|
||||
}
|
||||
|
||||
func mapIsolationLevel(level driver.IsolationLevel) (string, error) {
|
||||
switch sql.IsolationLevel(level) {
|
||||
case sql.LevelRepeatableRead:
|
||||
return "REPEATABLE READ", nil
|
||||
case sql.LevelReadCommitted:
|
||||
return "READ COMMITTED", nil
|
||||
case sql.LevelReadUncommitted:
|
||||
return "READ UNCOMMITTED", nil
|
||||
case sql.LevelSerializable:
|
||||
return "SERIALIZABLE", nil
|
||||
default:
|
||||
return "", errors.New("mysql: unsupported isolation level: " + string(level))
|
||||
}
|
||||
}
|
||||
|
||||
1
vendor/github.com/gocql/gocql/AUTHORS
generated
vendored
1
vendor/github.com/gocql/gocql/AUTHORS
generated
vendored
@@ -88,3 +88,4 @@ Jesse Claven <jesse.claven@gmail.com>
|
||||
Derrick Wippler <thrawn01@gmail.com>
|
||||
Leigh McCulloch <leigh@leighmcculloch.com>
|
||||
Ron Kuris <swcafe@gmail.com>
|
||||
Raphael Gavache <raphael.gavache@gmail.com>
|
||||
|
||||
1
vendor/github.com/gocql/gocql/query_executor.go
generated
vendored
1
vendor/github.com/gocql/gocql/query_executor.go
generated
vendored
@@ -48,6 +48,7 @@ func (q *queryExecutor) executeQuery(qry ExecutableQuery) (*Iter, error) {
|
||||
|
||||
// Exit for loop if the query was successful
|
||||
if iter.err == nil {
|
||||
iter.host = host
|
||||
return iter, nil
|
||||
}
|
||||
|
||||
|
||||
10
vendor/github.com/googleapis/gax-go/call_option.go
generated
vendored
10
vendor/github.com/googleapis/gax-go/call_option.go
generated
vendored
@@ -35,6 +35,7 @@ import (
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// CallOption is an option used by Invoke to control behaviors of RPC calls.
|
||||
@@ -80,7 +81,11 @@ type boRetryer struct {
|
||||
}
|
||||
|
||||
func (r *boRetryer) Retry(err error) (time.Duration, bool) {
|
||||
c := grpc.Code(err)
|
||||
st, ok := status.FromError(err)
|
||||
if !ok {
|
||||
return 0, false
|
||||
}
|
||||
c := st.Code()
|
||||
for _, rc := range r.codes {
|
||||
if c == rc {
|
||||
return r.backoff.Pause(), true
|
||||
@@ -121,6 +126,9 @@ func (bo *Backoff) Pause() time.Duration {
|
||||
if bo.Multiplier < 1 {
|
||||
bo.Multiplier = 2
|
||||
}
|
||||
// Select a duration between zero and the current max. It might seem counterintuitive to
|
||||
// have so much jitter, but https://www.awsarchitectureblog.com/2015/03/backoff.html
|
||||
// argues that that is the best strategy.
|
||||
d := time.Duration(rand.Int63n(int64(bo.cur)))
|
||||
bo.cur = time.Duration(float64(bo.cur) * bo.Multiplier)
|
||||
if bo.cur > bo.Max {
|
||||
|
||||
24
vendor/github.com/hashicorp/consul/api/agent.go
generated
vendored
24
vendor/github.com/hashicorp/consul/api/agent.go
generated
vendored
@@ -67,17 +67,19 @@ type AgentCheckRegistration struct {
|
||||
|
||||
// AgentServiceCheck is used to define a node or service level check
|
||||
type AgentServiceCheck struct {
|
||||
Script string `json:",omitempty"`
|
||||
DockerContainerID string `json:",omitempty"`
|
||||
Shell string `json:",omitempty"` // Only supported for Docker.
|
||||
Interval string `json:",omitempty"`
|
||||
Timeout string `json:",omitempty"`
|
||||
TTL string `json:",omitempty"`
|
||||
HTTP string `json:",omitempty"`
|
||||
TCP string `json:",omitempty"`
|
||||
Status string `json:",omitempty"`
|
||||
Notes string `json:",omitempty"`
|
||||
TLSSkipVerify bool `json:",omitempty"`
|
||||
Script string `json:",omitempty"`
|
||||
DockerContainerID string `json:",omitempty"`
|
||||
Shell string `json:",omitempty"` // Only supported for Docker.
|
||||
Interval string `json:",omitempty"`
|
||||
Timeout string `json:",omitempty"`
|
||||
TTL string `json:",omitempty"`
|
||||
HTTP string `json:",omitempty"`
|
||||
Header map[string][]string `json:",omitempty"`
|
||||
Method string `json:",omitempty"`
|
||||
TCP string `json:",omitempty"`
|
||||
Status string `json:",omitempty"`
|
||||
Notes string `json:",omitempty"`
|
||||
TLSSkipVerify bool `json:",omitempty"`
|
||||
|
||||
// In Consul 0.7 and later, checks that are associated with a service
|
||||
// may also contain this optional DeregisterCriticalServiceAfter field,
|
||||
|
||||
36
vendor/github.com/keybase/go-crypto/openpgp/keys.go
generated
vendored
36
vendor/github.com/keybase/go-crypto/openpgp/keys.go
generated
vendored
@@ -72,6 +72,7 @@ type Key struct {
|
||||
PublicKey *packet.PublicKey
|
||||
PrivateKey *packet.PrivateKey
|
||||
SelfSignature *packet.Signature
|
||||
KeyFlags packet.KeyFlagBits
|
||||
}
|
||||
|
||||
// A KeyRing provides access to public and private keys.
|
||||
@@ -145,7 +146,7 @@ func (e *Entity) encryptionKey(now time.Time) (Key, bool) {
|
||||
|
||||
if candidateSubkey != -1 {
|
||||
subkey := e.Subkeys[candidateSubkey]
|
||||
return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true
|
||||
return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig, subkey.Sig.GetKeyFlags()}, true
|
||||
}
|
||||
|
||||
// If we don't have any candidate subkeys for encryption and
|
||||
@@ -159,7 +160,7 @@ func (e *Entity) encryptionKey(now time.Time) (Key, bool) {
|
||||
if (!i.SelfSignature.FlagsValid || i.SelfSignature.FlagEncryptCommunications) &&
|
||||
e.PrimaryKey.PubKeyAlgo.CanEncrypt() &&
|
||||
!i.SelfSignature.KeyExpired(now) {
|
||||
return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true
|
||||
return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature, i.SelfSignature.GetKeyFlags()}, true
|
||||
}
|
||||
|
||||
// This Entity appears to be signing only.
|
||||
@@ -184,7 +185,7 @@ func (e *Entity) signingKey(now time.Time) (Key, bool) {
|
||||
|
||||
if candidateSubkey != -1 {
|
||||
subkey := e.Subkeys[candidateSubkey]
|
||||
return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig}, true
|
||||
return Key{e, subkey.PublicKey, subkey.PrivateKey, subkey.Sig, subkey.Sig.GetKeyFlags()}, true
|
||||
}
|
||||
|
||||
// If we have no candidate subkey then we assume that it's ok to sign
|
||||
@@ -194,7 +195,7 @@ func (e *Entity) signingKey(now time.Time) (Key, bool) {
|
||||
e.PrimaryKey.PubKeyAlgo.CanSign() &&
|
||||
!i.SelfSignature.KeyExpired(now) &&
|
||||
e.PrivateKey.PrivateKey != nil {
|
||||
return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature}, true
|
||||
return Key{e, e.PrimaryKey, e.PrivateKey, i.SelfSignature, i.SelfSignature.GetKeyFlags()}, true
|
||||
}
|
||||
|
||||
return Key{}, false
|
||||
@@ -229,7 +230,13 @@ func (el EntityList) KeysById(id uint64, fp []byte) (keys []Key) {
|
||||
break
|
||||
}
|
||||
}
|
||||
keys = append(keys, Key{e, e.PrimaryKey, e.PrivateKey, selfSig})
|
||||
|
||||
var keyFlags packet.KeyFlagBits
|
||||
for _, ident := range e.Identities {
|
||||
keyFlags.Merge(ident.SelfSignature.GetKeyFlags())
|
||||
}
|
||||
|
||||
keys = append(keys, Key{e, e.PrimaryKey, e.PrivateKey, selfSig, keyFlags})
|
||||
}
|
||||
|
||||
for _, subKey := range e.Subkeys {
|
||||
@@ -242,7 +249,7 @@ func (el EntityList) KeysById(id uint64, fp []byte) (keys []Key) {
|
||||
sig = subKey.Sig
|
||||
}
|
||||
|
||||
keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, sig})
|
||||
keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, sig, sig.GetKeyFlags()})
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -269,19 +276,8 @@ func (el EntityList) KeysByIdUsage(id uint64, fp []byte, requiredUsage byte) (ke
|
||||
var usage byte
|
||||
|
||||
switch {
|
||||
case key.SelfSignature.FlagsValid:
|
||||
if key.SelfSignature.FlagCertify {
|
||||
usage |= packet.KeyFlagCertify
|
||||
}
|
||||
if key.SelfSignature.FlagSign {
|
||||
usage |= packet.KeyFlagSign
|
||||
}
|
||||
if key.SelfSignature.FlagEncryptCommunications {
|
||||
usage |= packet.KeyFlagEncryptCommunications
|
||||
}
|
||||
if key.SelfSignature.FlagEncryptStorage {
|
||||
usage |= packet.KeyFlagEncryptStorage
|
||||
}
|
||||
case key.KeyFlags.Valid:
|
||||
usage = key.KeyFlags.BitField
|
||||
|
||||
case key.PublicKey.PubKeyAlgo == packet.PubKeyAlgoElGamal:
|
||||
// We also need to handle the case where, although the sig's
|
||||
@@ -319,7 +315,7 @@ func (el EntityList) DecryptionKeys() (keys []Key) {
|
||||
for _, e := range el {
|
||||
for _, subKey := range e.Subkeys {
|
||||
if subKey.PrivateKey != nil && subKey.PrivateKey.PrivateKey != nil && (!subKey.Sig.FlagsValid || subKey.Sig.FlagEncryptStorage || subKey.Sig.FlagEncryptCommunications) {
|
||||
keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig})
|
||||
keys = append(keys, Key{e, subKey.PublicKey, subKey.PrivateKey, subKey.Sig, subKey.Sig.GetKeyFlags()})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
2
vendor/github.com/keybase/go-crypto/openpgp/packet/public_key.go
generated
vendored
2
vendor/github.com/keybase/go-crypto/openpgp/packet/public_key.go
generated
vendored
@@ -66,7 +66,7 @@ type edDSAkey struct {
|
||||
|
||||
func copyFrontFill(dst, src []byte, length int) int {
|
||||
if srcLen := len(src); srcLen < length {
|
||||
return copy(dst[length - srcLen:], src[:])
|
||||
return copy(dst[length-srcLen:], src[:])
|
||||
} else {
|
||||
return copy(dst[:], src[:])
|
||||
}
|
||||
|
||||
66
vendor/github.com/keybase/go-crypto/openpgp/packet/signature.go
generated
vendored
66
vendor/github.com/keybase/go-crypto/openpgp/packet/signature.go
generated
vendored
@@ -44,6 +44,13 @@ type RevocationKey struct {
|
||||
Fingerprint []byte
|
||||
}
|
||||
|
||||
// KeyFlagBits holds boolean whether any usage flags were provided in
|
||||
// the signature and BitField with KeyFlag* flags.
|
||||
type KeyFlagBits struct {
|
||||
Valid bool
|
||||
BitField byte
|
||||
}
|
||||
|
||||
// Signature represents a signature. See RFC 4880, section 5.2.
|
||||
type Signature struct {
|
||||
SigType SignatureType
|
||||
@@ -798,20 +805,7 @@ func (sig *Signature) buildSubpackets() (subpackets []outputSubpacket) {
|
||||
// Key flags may only appear in self-signatures or certification signatures.
|
||||
|
||||
if sig.FlagsValid {
|
||||
var flags byte
|
||||
if sig.FlagCertify {
|
||||
flags |= KeyFlagCertify
|
||||
}
|
||||
if sig.FlagSign {
|
||||
flags |= KeyFlagSign
|
||||
}
|
||||
if sig.FlagEncryptCommunications {
|
||||
flags |= KeyFlagEncryptCommunications
|
||||
}
|
||||
if sig.FlagEncryptStorage {
|
||||
flags |= KeyFlagEncryptStorage
|
||||
}
|
||||
subpackets = append(subpackets, outputSubpacket{true, keyFlagsSubpacket, false, []byte{flags}})
|
||||
subpackets = append(subpackets, outputSubpacket{true, keyFlagsSubpacket, false, []byte{sig.GetKeyFlags().BitField}})
|
||||
}
|
||||
|
||||
// The following subpackets may only appear in self-signatures
|
||||
@@ -840,3 +834,47 @@ func (sig *Signature) buildSubpackets() (subpackets []outputSubpacket) {
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (sig *Signature) GetKeyFlags() (ret KeyFlagBits) {
|
||||
if !sig.FlagsValid {
|
||||
return ret
|
||||
}
|
||||
|
||||
ret.Valid = true
|
||||
if sig.FlagCertify {
|
||||
ret.BitField |= KeyFlagCertify
|
||||
}
|
||||
if sig.FlagSign {
|
||||
ret.BitField |= KeyFlagSign
|
||||
}
|
||||
if sig.FlagEncryptCommunications {
|
||||
ret.BitField |= KeyFlagEncryptCommunications
|
||||
}
|
||||
if sig.FlagEncryptStorage {
|
||||
ret.BitField |= KeyFlagEncryptStorage
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
func (f *KeyFlagBits) HasFlagCertify() bool {
|
||||
return f.BitField&KeyFlagCertify != 0
|
||||
}
|
||||
|
||||
func (f *KeyFlagBits) HasFlagSign() bool {
|
||||
return f.BitField&KeyFlagSign != 0
|
||||
}
|
||||
|
||||
func (f *KeyFlagBits) HasFlagEncryptCommunications() bool {
|
||||
return f.BitField&KeyFlagEncryptCommunications != 0
|
||||
}
|
||||
|
||||
func (f *KeyFlagBits) HasFlagEncryptStorage() bool {
|
||||
return f.BitField&KeyFlagEncryptStorage != 0
|
||||
}
|
||||
|
||||
func (f *KeyFlagBits) Merge(other KeyFlagBits) {
|
||||
if other.Valid {
|
||||
f.Valid = true
|
||||
f.BitField |= other.BitField
|
||||
}
|
||||
}
|
||||
|
||||
2
vendor/github.com/mattn/go-colorable/colorable_appengine.go
generated
vendored
2
vendor/github.com/mattn/go-colorable/colorable_appengine.go
generated
vendored
@@ -5,6 +5,8 @@ package colorable
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
_ "github.com/mattn/go-isatty"
|
||||
)
|
||||
|
||||
// NewColorable return new instance of Writer which handle escape sequence.
|
||||
|
||||
2
vendor/github.com/mattn/go-colorable/colorable_others.go
generated
vendored
2
vendor/github.com/mattn/go-colorable/colorable_others.go
generated
vendored
@@ -6,6 +6,8 @@ package colorable
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
_ "github.com/mattn/go-isatty"
|
||||
)
|
||||
|
||||
// NewColorable return new instance of Writer which handle escape sequence.
|
||||
|
||||
32
vendor/github.com/opencontainers/go-digest/algorithm.go
generated
vendored
32
vendor/github.com/opencontainers/go-digest/algorithm.go
generated
vendored
@@ -19,6 +19,7 @@ import (
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
// Algorithm identifies and implementation of a digester by an identifier.
|
||||
@@ -28,9 +29,9 @@ type Algorithm string
|
||||
|
||||
// supported digest types
|
||||
const (
|
||||
SHA256 Algorithm = "sha256" // sha256 with hex encoding
|
||||
SHA384 Algorithm = "sha384" // sha384 with hex encoding
|
||||
SHA512 Algorithm = "sha512" // sha512 with hex encoding
|
||||
SHA256 Algorithm = "sha256" // sha256 with hex encoding (lower case only)
|
||||
SHA384 Algorithm = "sha384" // sha384 with hex encoding (lower case only)
|
||||
SHA512 Algorithm = "sha512" // sha512 with hex encoding (lower case only)
|
||||
|
||||
// Canonical is the primary digest algorithm used with the distribution
|
||||
// project. Other digests may be used but this one is the primary storage
|
||||
@@ -50,6 +51,14 @@ var (
|
||||
SHA384: crypto.SHA384,
|
||||
SHA512: crypto.SHA512,
|
||||
}
|
||||
|
||||
// anchoredEncodedRegexps contains anchored regular expressions for hex-encoded digests.
|
||||
// Note that /A-F/ disallowed.
|
||||
anchoredEncodedRegexps = map[Algorithm]*regexp.Regexp{
|
||||
SHA256: regexp.MustCompile(`^[a-f0-9]{64}$`),
|
||||
SHA384: regexp.MustCompile(`^[a-f0-9]{96}$`),
|
||||
SHA512: regexp.MustCompile(`^[a-f0-9]{128}$`),
|
||||
}
|
||||
)
|
||||
|
||||
// Available returns true if the digest type is available for use. If this
|
||||
@@ -164,3 +173,20 @@ func (a Algorithm) FromBytes(p []byte) Digest {
|
||||
func (a Algorithm) FromString(s string) Digest {
|
||||
return a.FromBytes([]byte(s))
|
||||
}
|
||||
|
||||
// Validate validates the encoded portion string
|
||||
func (a Algorithm) Validate(encoded string) error {
|
||||
r, ok := anchoredEncodedRegexps[a]
|
||||
if !ok {
|
||||
return ErrDigestUnsupported
|
||||
}
|
||||
// Digests much always be hex-encoded, ensuring that their hex portion will
|
||||
// always be size*2
|
||||
if a.Size()*2 != len(encoded) {
|
||||
return ErrDigestInvalidLength
|
||||
}
|
||||
if r.MatchString(encoded) {
|
||||
return nil
|
||||
}
|
||||
return ErrDigestInvalidFormat
|
||||
}
|
||||
|
||||
20
vendor/github.com/opencontainers/go-digest/digest.go
generated
vendored
20
vendor/github.com/opencontainers/go-digest/digest.go
generated
vendored
@@ -101,26 +101,18 @@ func FromString(s string) Digest {
|
||||
// error if not.
|
||||
func (d Digest) Validate() error {
|
||||
s := string(d)
|
||||
|
||||
i := strings.Index(s, ":")
|
||||
|
||||
// validate i then run through regexp
|
||||
if i < 0 || i+1 == len(s) || !DigestRegexpAnchored.MatchString(s) {
|
||||
if i <= 0 || i+1 == len(s) {
|
||||
return ErrDigestInvalidFormat
|
||||
}
|
||||
|
||||
algorithm := Algorithm(s[:i])
|
||||
algorithm, encoded := Algorithm(s[:i]), s[i+1:]
|
||||
if !algorithm.Available() {
|
||||
if !DigestRegexpAnchored.MatchString(s) {
|
||||
return ErrDigestInvalidFormat
|
||||
}
|
||||
return ErrDigestUnsupported
|
||||
}
|
||||
|
||||
// Digests much always be hex-encoded, ensuring that their hex portion will
|
||||
// always be size*2
|
||||
if algorithm.Size()*2 != len(s[i+1:]) {
|
||||
return ErrDigestInvalidLength
|
||||
}
|
||||
|
||||
return nil
|
||||
return algorithm.Validate(encoded)
|
||||
}
|
||||
|
||||
// Algorithm returns the algorithm portion of the digest. This will panic if
|
||||
|
||||
262
vendor/github.com/opencontainers/runc/libcontainer/README.md
generated
vendored
262
vendor/github.com/opencontainers/runc/libcontainer/README.md
generated
vendored
@@ -1,262 +0,0 @@
|
||||
# libcontainer
|
||||
|
||||
[](https://godoc.org/github.com/opencontainers/runc/libcontainer)
|
||||
|
||||
Libcontainer provides a native Go implementation for creating containers
|
||||
with namespaces, cgroups, capabilities, and filesystem access controls.
|
||||
It allows you to manage the lifecycle of the container performing additional operations
|
||||
after the container is created.
|
||||
|
||||
|
||||
#### Container
|
||||
A container is a self contained execution environment that shares the kernel of the
|
||||
host system and which is (optionally) isolated from other containers in the system.
|
||||
|
||||
#### Using libcontainer
|
||||
|
||||
Because containers are spawned in a two step process you will need a binary that
|
||||
will be executed as the init process for the container. In libcontainer, we use
|
||||
the current binary (/proc/self/exe) to be executed as the init process, and use
|
||||
arg "init", we call the first step process "bootstrap", so you always need a "init"
|
||||
function as the entry of "bootstrap".
|
||||
|
||||
In addition to the go init function the early stage bootstrap is handled by importing
|
||||
[nsenter](https://github.com/opencontainers/runc/blob/master/libcontainer/nsenter/README.md).
|
||||
|
||||
```go
|
||||
import (
|
||||
_ "github.com/opencontainers/runc/libcontainer/nsenter"
|
||||
)
|
||||
|
||||
func init() {
|
||||
if len(os.Args) > 1 && os.Args[1] == "init" {
|
||||
runtime.GOMAXPROCS(1)
|
||||
runtime.LockOSThread()
|
||||
factory, _ := libcontainer.New("")
|
||||
if err := factory.StartInitialization(); err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
panic("--this line should have never been executed, congratulations--")
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Then to create a container you first have to initialize an instance of a factory
|
||||
that will handle the creation and initialization for a container.
|
||||
|
||||
```go
|
||||
factory, err := libcontainer.New("/var/lib/container", libcontainer.Cgroupfs, libcontainer.InitArgs(os.Args[0], "init"))
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
return
|
||||
}
|
||||
```
|
||||
|
||||
Once you have an instance of the factory created we can create a configuration
|
||||
struct describing how the container is to be created. A sample would look similar to this:
|
||||
|
||||
```go
|
||||
defaultMountFlags := unix.MS_NOEXEC | unix.MS_NOSUID | unix.MS_NODEV
|
||||
config := &configs.Config{
|
||||
Rootfs: "/your/path/to/rootfs",
|
||||
Capabilities: []string{
|
||||
"CAP_CHOWN",
|
||||
"CAP_DAC_OVERRIDE",
|
||||
"CAP_FSETID",
|
||||
"CAP_FOWNER",
|
||||
"CAP_MKNOD",
|
||||
"CAP_NET_RAW",
|
||||
"CAP_SETGID",
|
||||
"CAP_SETUID",
|
||||
"CAP_SETFCAP",
|
||||
"CAP_SETPCAP",
|
||||
"CAP_NET_BIND_SERVICE",
|
||||
"CAP_SYS_CHROOT",
|
||||
"CAP_KILL",
|
||||
"CAP_AUDIT_WRITE",
|
||||
},
|
||||
Namespaces: configs.Namespaces([]configs.Namespace{
|
||||
{Type: configs.NEWNS},
|
||||
{Type: configs.NEWUTS},
|
||||
{Type: configs.NEWIPC},
|
||||
{Type: configs.NEWPID},
|
||||
{Type: configs.NEWUSER},
|
||||
{Type: configs.NEWNET},
|
||||
}),
|
||||
Cgroups: &configs.Cgroup{
|
||||
Name: "test-container",
|
||||
Parent: "system",
|
||||
Resources: &configs.Resources{
|
||||
MemorySwappiness: nil,
|
||||
AllowAllDevices: nil,
|
||||
AllowedDevices: configs.DefaultAllowedDevices,
|
||||
},
|
||||
},
|
||||
MaskPaths: []string{
|
||||
"/proc/kcore",
|
||||
"/sys/firmware",
|
||||
},
|
||||
ReadonlyPaths: []string{
|
||||
"/proc/sys", "/proc/sysrq-trigger", "/proc/irq", "/proc/bus",
|
||||
},
|
||||
Devices: configs.DefaultAutoCreatedDevices,
|
||||
Hostname: "testing",
|
||||
Mounts: []*configs.Mount{
|
||||
{
|
||||
Source: "proc",
|
||||
Destination: "/proc",
|
||||
Device: "proc",
|
||||
Flags: defaultMountFlags,
|
||||
},
|
||||
{
|
||||
Source: "tmpfs",
|
||||
Destination: "/dev",
|
||||
Device: "tmpfs",
|
||||
Flags: unix.MS_NOSUID | unix.MS_STRICTATIME,
|
||||
Data: "mode=755",
|
||||
},
|
||||
{
|
||||
Source: "devpts",
|
||||
Destination: "/dev/pts",
|
||||
Device: "devpts",
|
||||
Flags: unix.MS_NOSUID | unix.MS_NOEXEC,
|
||||
Data: "newinstance,ptmxmode=0666,mode=0620,gid=5",
|
||||
},
|
||||
{
|
||||
Device: "tmpfs",
|
||||
Source: "shm",
|
||||
Destination: "/dev/shm",
|
||||
Data: "mode=1777,size=65536k",
|
||||
Flags: defaultMountFlags,
|
||||
},
|
||||
{
|
||||
Source: "mqueue",
|
||||
Destination: "/dev/mqueue",
|
||||
Device: "mqueue",
|
||||
Flags: defaultMountFlags,
|
||||
},
|
||||
{
|
||||
Source: "sysfs",
|
||||
Destination: "/sys",
|
||||
Device: "sysfs",
|
||||
Flags: defaultMountFlags | unix.MS_RDONLY,
|
||||
},
|
||||
},
|
||||
UidMappings: []configs.IDMap{
|
||||
{
|
||||
ContainerID: 0,
|
||||
HostID: 1000,
|
||||
Size: 65536,
|
||||
},
|
||||
},
|
||||
GidMappings: []configs.IDMap{
|
||||
{
|
||||
ContainerID: 0,
|
||||
HostID: 1000,
|
||||
Size: 65536,
|
||||
},
|
||||
},
|
||||
Networks: []*configs.Network{
|
||||
{
|
||||
Type: "loopback",
|
||||
Address: "127.0.0.1/0",
|
||||
Gateway: "localhost",
|
||||
},
|
||||
},
|
||||
Rlimits: []configs.Rlimit{
|
||||
{
|
||||
Type: unix.RLIMIT_NOFILE,
|
||||
Hard: uint64(1025),
|
||||
Soft: uint64(1025),
|
||||
},
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
Once you have the configuration populated you can create a container:
|
||||
|
||||
```go
|
||||
container, err := factory.Create("container-id", config)
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
return
|
||||
}
|
||||
```
|
||||
|
||||
To spawn bash as the initial process inside the container and have the
|
||||
processes pid returned in order to wait, signal, or kill the process:
|
||||
|
||||
```go
|
||||
process := &libcontainer.Process{
|
||||
Args: []string{"/bin/bash"},
|
||||
Env: []string{"PATH=/bin"},
|
||||
User: "daemon",
|
||||
Stdin: os.Stdin,
|
||||
Stdout: os.Stdout,
|
||||
Stderr: os.Stderr,
|
||||
}
|
||||
|
||||
err := container.Run(process)
|
||||
if err != nil {
|
||||
container.Destroy()
|
||||
logrus.Fatal(err)
|
||||
return
|
||||
}
|
||||
|
||||
// wait for the process to finish.
|
||||
_, err := process.Wait()
|
||||
if err != nil {
|
||||
logrus.Fatal(err)
|
||||
}
|
||||
|
||||
// destroy the container.
|
||||
container.Destroy()
|
||||
```
|
||||
|
||||
Additional ways to interact with a running container are:
|
||||
|
||||
```go
|
||||
// return all the pids for all processes running inside the container.
|
||||
processes, err := container.Processes()
|
||||
|
||||
// get detailed cpu, memory, io, and network statistics for the container and
|
||||
// it's processes.
|
||||
stats, err := container.Stats()
|
||||
|
||||
// pause all processes inside the container.
|
||||
container.Pause()
|
||||
|
||||
// resume all paused processes.
|
||||
container.Resume()
|
||||
|
||||
// send signal to container's init process.
|
||||
container.Signal(signal)
|
||||
|
||||
// update container resource constraints.
|
||||
container.Set(config)
|
||||
|
||||
// get current status of the container.
|
||||
status, err := container.Status()
|
||||
|
||||
// get current container's state information.
|
||||
state, err := container.State()
|
||||
```
|
||||
|
||||
|
||||
#### Checkpoint & Restore
|
||||
|
||||
libcontainer now integrates [CRIU](http://criu.org/) for checkpointing and restoring containers.
|
||||
This let's you save the state of a process running inside a container to disk, and then restore
|
||||
that state into a new process, on the same machine or on another machine.
|
||||
|
||||
`criu` version 1.5.2 or higher is required to use checkpoint and restore.
|
||||
If you don't already have `criu` installed, you can build it from source, following the
|
||||
[online instructions](http://criu.org/Installation). `criu` is also installed in the docker image
|
||||
generated when building libcontainer with docker.
|
||||
|
||||
|
||||
## Copyright and license
|
||||
|
||||
Code and documentation copyright 2014 Docker, inc. Code released under the Apache 2.0 license.
|
||||
Docs released under Creative commons.
|
||||
|
||||
334
vendor/github.com/opencontainers/runc/libcontainer/SPEC.md
generated
vendored
334
vendor/github.com/opencontainers/runc/libcontainer/SPEC.md
generated
vendored
@@ -1,334 +0,0 @@
|
||||
## Container Specification - v1
|
||||
|
||||
This is the standard configuration for version 1 containers. It includes
|
||||
namespaces, standard filesystem setup, a default Linux capability set, and
|
||||
information about resource reservations. It also has information about any
|
||||
populated environment settings for the processes running inside a container.
|
||||
|
||||
Along with the configuration of how a container is created the standard also
|
||||
discusses actions that can be performed on a container to manage and inspect
|
||||
information about the processes running inside.
|
||||
|
||||
The v1 profile is meant to be able to accommodate the majority of applications
|
||||
with a strong security configuration.
|
||||
|
||||
### System Requirements and Compatibility
|
||||
|
||||
Minimum requirements:
|
||||
* Kernel version - 3.10 recommended 2.6.2x minimum(with backported patches)
|
||||
* Mounted cgroups with each subsystem in its own hierarchy
|
||||
|
||||
|
||||
### Namespaces
|
||||
|
||||
| Flag | Enabled |
|
||||
| ------------ | ------- |
|
||||
| CLONE_NEWPID | 1 |
|
||||
| CLONE_NEWUTS | 1 |
|
||||
| CLONE_NEWIPC | 1 |
|
||||
| CLONE_NEWNET | 1 |
|
||||
| CLONE_NEWNS | 1 |
|
||||
| CLONE_NEWUSER | 1 |
|
||||
|
||||
Namespaces are created for the container via the `clone` syscall.
|
||||
|
||||
|
||||
### Filesystem
|
||||
|
||||
A root filesystem must be provided to a container for execution. The container
|
||||
will use this root filesystem (rootfs) to jail and spawn processes inside where
|
||||
the binaries and system libraries are local to that directory. Any binaries
|
||||
to be executed must be contained within this rootfs.
|
||||
|
||||
Mounts that happen inside the container are automatically cleaned up when the
|
||||
container exits as the mount namespace is destroyed and the kernel will
|
||||
unmount all the mounts that were setup within that namespace.
|
||||
|
||||
For a container to execute properly there are certain filesystems that
|
||||
are required to be mounted within the rootfs that the runtime will setup.
|
||||
|
||||
| Path | Type | Flags | Data |
|
||||
| ----------- | ------ | -------------------------------------- | ---------------------------------------- |
|
||||
| /proc | proc | MS_NOEXEC,MS_NOSUID,MS_NODEV | |
|
||||
| /dev | tmpfs | MS_NOEXEC,MS_STRICTATIME | mode=755 |
|
||||
| /dev/shm | tmpfs | MS_NOEXEC,MS_NOSUID,MS_NODEV | mode=1777,size=65536k |
|
||||
| /dev/mqueue | mqueue | MS_NOEXEC,MS_NOSUID,MS_NODEV | |
|
||||
| /dev/pts | devpts | MS_NOEXEC,MS_NOSUID | newinstance,ptmxmode=0666,mode=620,gid=5 |
|
||||
| /sys | sysfs | MS_NOEXEC,MS_NOSUID,MS_NODEV,MS_RDONLY | |
|
||||
|
||||
|
||||
After a container's filesystems are mounted within the newly created
|
||||
mount namespace `/dev` will need to be populated with a set of device nodes.
|
||||
It is expected that a rootfs does not need to have any device nodes specified
|
||||
for `/dev` within the rootfs as the container will setup the correct devices
|
||||
that are required for executing a container's process.
|
||||
|
||||
| Path | Mode | Access |
|
||||
| ------------ | ---- | ---------- |
|
||||
| /dev/null | 0666 | rwm |
|
||||
| /dev/zero | 0666 | rwm |
|
||||
| /dev/full | 0666 | rwm |
|
||||
| /dev/tty | 0666 | rwm |
|
||||
| /dev/random | 0666 | rwm |
|
||||
| /dev/urandom | 0666 | rwm |
|
||||
|
||||
|
||||
**ptmx**
|
||||
`/dev/ptmx` will need to be a symlink to the host's `/dev/ptmx` within
|
||||
the container.
|
||||
|
||||
The use of a pseudo TTY is optional within a container and it should support both.
|
||||
If a pseudo is provided to the container `/dev/console` will need to be
|
||||
setup by binding the console in `/dev/` after it has been populated and mounted
|
||||
in tmpfs.
|
||||
|
||||
| Source | Destination | UID GID | Mode | Type |
|
||||
| --------------- | ------------ | ------- | ---- | ---- |
|
||||
| *pty host path* | /dev/console | 0 0 | 0600 | bind |
|
||||
|
||||
|
||||
After `/dev/null` has been setup we check for any external links between
|
||||
the container's io, STDIN, STDOUT, STDERR. If the container's io is pointing
|
||||
to `/dev/null` outside the container we close and `dup2` the `/dev/null`
|
||||
that is local to the container's rootfs.
|
||||
|
||||
|
||||
After the container has `/proc` mounted a few standard symlinks are setup
|
||||
within `/dev/` for the io.
|
||||
|
||||
| Source | Destination |
|
||||
| --------------- | ----------- |
|
||||
| /proc/self/fd | /dev/fd |
|
||||
| /proc/self/fd/0 | /dev/stdin |
|
||||
| /proc/self/fd/1 | /dev/stdout |
|
||||
| /proc/self/fd/2 | /dev/stderr |
|
||||
|
||||
A `pivot_root` is used to change the root for the process, effectively
|
||||
jailing the process inside the rootfs.
|
||||
|
||||
```c
|
||||
put_old = mkdir(...);
|
||||
pivot_root(rootfs, put_old);
|
||||
chdir("/");
|
||||
unmount(put_old, MS_DETACH);
|
||||
rmdir(put_old);
|
||||
```
|
||||
|
||||
For container's running with a rootfs inside `ramfs` a `MS_MOVE` combined
|
||||
with a `chroot` is required as `pivot_root` is not supported in `ramfs`.
|
||||
|
||||
```c
|
||||
mount(rootfs, "/", NULL, MS_MOVE, NULL);
|
||||
chroot(".");
|
||||
chdir("/");
|
||||
```
|
||||
|
||||
The `umask` is set back to `0022` after the filesystem setup has been completed.
|
||||
|
||||
### Resources
|
||||
|
||||
Cgroups are used to handle resource allocation for containers. This includes
|
||||
system resources like cpu, memory, and device access.
|
||||
|
||||
| Subsystem | Enabled |
|
||||
| ---------- | ------- |
|
||||
| devices | 1 |
|
||||
| memory | 1 |
|
||||
| cpu | 1 |
|
||||
| cpuacct | 1 |
|
||||
| cpuset | 1 |
|
||||
| blkio | 1 |
|
||||
| perf_event | 1 |
|
||||
| freezer | 1 |
|
||||
| hugetlb | 1 |
|
||||
| pids | 1 |
|
||||
|
||||
|
||||
All cgroup subsystem are joined so that statistics can be collected from
|
||||
each of the subsystems. Freezer does not expose any stats but is joined
|
||||
so that containers can be paused and resumed.
|
||||
|
||||
The parent process of the container's init must place the init pid inside
|
||||
the correct cgroups before the initialization begins. This is done so
|
||||
that no processes or threads escape the cgroups. This sync is
|
||||
done via a pipe ( specified in the runtime section below ) that the container's
|
||||
init process will block waiting for the parent to finish setup.
|
||||
|
||||
### Security
|
||||
|
||||
The standard set of Linux capabilities that are set in a container
|
||||
provide a good default for security and flexibility for the applications.
|
||||
|
||||
|
||||
| Capability | Enabled |
|
||||
| -------------------- | ------- |
|
||||
| CAP_NET_RAW | 1 |
|
||||
| CAP_NET_BIND_SERVICE | 1 |
|
||||
| CAP_AUDIT_READ | 1 |
|
||||
| CAP_AUDIT_WRITE | 1 |
|
||||
| CAP_DAC_OVERRIDE | 1 |
|
||||
| CAP_SETFCAP | 1 |
|
||||
| CAP_SETPCAP | 1 |
|
||||
| CAP_SETGID | 1 |
|
||||
| CAP_SETUID | 1 |
|
||||
| CAP_MKNOD | 1 |
|
||||
| CAP_CHOWN | 1 |
|
||||
| CAP_FOWNER | 1 |
|
||||
| CAP_FSETID | 1 |
|
||||
| CAP_KILL | 1 |
|
||||
| CAP_SYS_CHROOT | 1 |
|
||||
| CAP_NET_BROADCAST | 0 |
|
||||
| CAP_SYS_MODULE | 0 |
|
||||
| CAP_SYS_RAWIO | 0 |
|
||||
| CAP_SYS_PACCT | 0 |
|
||||
| CAP_SYS_ADMIN | 0 |
|
||||
| CAP_SYS_NICE | 0 |
|
||||
| CAP_SYS_RESOURCE | 0 |
|
||||
| CAP_SYS_TIME | 0 |
|
||||
| CAP_SYS_TTY_CONFIG | 0 |
|
||||
| CAP_AUDIT_CONTROL | 0 |
|
||||
| CAP_MAC_OVERRIDE | 0 |
|
||||
| CAP_MAC_ADMIN | 0 |
|
||||
| CAP_NET_ADMIN | 0 |
|
||||
| CAP_SYSLOG | 0 |
|
||||
| CAP_DAC_READ_SEARCH | 0 |
|
||||
| CAP_LINUX_IMMUTABLE | 0 |
|
||||
| CAP_IPC_LOCK | 0 |
|
||||
| CAP_IPC_OWNER | 0 |
|
||||
| CAP_SYS_PTRACE | 0 |
|
||||
| CAP_SYS_BOOT | 0 |
|
||||
| CAP_LEASE | 0 |
|
||||
| CAP_WAKE_ALARM | 0 |
|
||||
| CAP_BLOCK_SUSPEND | 0 |
|
||||
|
||||
|
||||
Additional security layers like [apparmor](https://wiki.ubuntu.com/AppArmor)
|
||||
and [selinux](http://selinuxproject.org/page/Main_Page) can be used with
|
||||
the containers. A container should support setting an apparmor profile or
|
||||
selinux process and mount labels if provided in the configuration.
|
||||
|
||||
Standard apparmor profile:
|
||||
```c
|
||||
#include <tunables/global>
|
||||
profile <profile_name> flags=(attach_disconnected,mediate_deleted) {
|
||||
#include <abstractions/base>
|
||||
network,
|
||||
capability,
|
||||
file,
|
||||
umount,
|
||||
|
||||
deny @{PROC}/sys/fs/** wklx,
|
||||
deny @{PROC}/sysrq-trigger rwklx,
|
||||
deny @{PROC}/mem rwklx,
|
||||
deny @{PROC}/kmem rwklx,
|
||||
deny @{PROC}/sys/kernel/[^s][^h][^m]* wklx,
|
||||
deny @{PROC}/sys/kernel/*/** wklx,
|
||||
|
||||
deny mount,
|
||||
|
||||
deny /sys/[^f]*/** wklx,
|
||||
deny /sys/f[^s]*/** wklx,
|
||||
deny /sys/fs/[^c]*/** wklx,
|
||||
deny /sys/fs/c[^g]*/** wklx,
|
||||
deny /sys/fs/cg[^r]*/** wklx,
|
||||
deny /sys/firmware/efi/efivars/** rwklx,
|
||||
deny /sys/kernel/security/** rwklx,
|
||||
}
|
||||
```
|
||||
|
||||
*TODO: seccomp work is being done to find a good default config*
|
||||
|
||||
### Runtime and Init Process
|
||||
|
||||
During container creation the parent process needs to talk to the container's init
|
||||
process and have a form of synchronization. This is accomplished by creating
|
||||
a pipe that is passed to the container's init. When the init process first spawns
|
||||
it will block on its side of the pipe until the parent closes its side. This
|
||||
allows the parent to have time to set the new process inside a cgroup hierarchy
|
||||
and/or write any uid/gid mappings required for user namespaces.
|
||||
The pipe is passed to the init process via FD 3.
|
||||
|
||||
The application consuming libcontainer should be compiled statically. libcontainer
|
||||
does not define any init process and the arguments provided are used to `exec` the
|
||||
process inside the application. There should be no long running init within the
|
||||
container spec.
|
||||
|
||||
If a pseudo tty is provided to a container it will open and `dup2` the console
|
||||
as the container's STDIN, STDOUT, STDERR as well as mounting the console
|
||||
as `/dev/console`.
|
||||
|
||||
An extra set of mounts are provided to a container and setup for use. A container's
|
||||
rootfs can contain some non portable files inside that can cause side effects during
|
||||
execution of a process. These files are usually created and populated with the container
|
||||
specific information via the runtime.
|
||||
|
||||
**Extra runtime files:**
|
||||
* /etc/hosts
|
||||
* /etc/resolv.conf
|
||||
* /etc/hostname
|
||||
* /etc/localtime
|
||||
|
||||
|
||||
#### Defaults
|
||||
|
||||
There are a few defaults that can be overridden by users, but in their omission
|
||||
these apply to processes within a container.
|
||||
|
||||
| Type | Value |
|
||||
| ------------------- | ------------------------------ |
|
||||
| Parent Death Signal | SIGKILL |
|
||||
| UID | 0 |
|
||||
| GID | 0 |
|
||||
| GROUPS | 0, NULL |
|
||||
| CWD | "/" |
|
||||
| $HOME | Current user's home dir or "/" |
|
||||
| Readonly rootfs | false |
|
||||
| Pseudo TTY | false |
|
||||
|
||||
|
||||
## Actions
|
||||
|
||||
After a container is created there is a standard set of actions that can
|
||||
be done to the container. These actions are part of the public API for
|
||||
a container.
|
||||
|
||||
| Action | Description |
|
||||
| -------------- | ------------------------------------------------------------------ |
|
||||
| Get processes | Return all the pids for processes running inside a container |
|
||||
| Get Stats | Return resource statistics for the container as a whole |
|
||||
| Wait | Waits on the container's init process ( pid 1 ) |
|
||||
| Wait Process | Wait on any of the container's processes returning the exit status |
|
||||
| Destroy | Kill the container's init process and remove any filesystem state |
|
||||
| Signal | Send a signal to the container's init process |
|
||||
| Signal Process | Send a signal to any of the container's processes |
|
||||
| Pause | Pause all processes inside the container |
|
||||
| Resume | Resume all processes inside the container if paused |
|
||||
| Exec | Execute a new process inside of the container ( requires setns ) |
|
||||
| Set | Setup configs of the container after it's created |
|
||||
|
||||
### Execute a new process inside of a running container.
|
||||
|
||||
User can execute a new process inside of a running container. Any binaries to be
|
||||
executed must be accessible within the container's rootfs.
|
||||
|
||||
The started process will run inside the container's rootfs. Any changes
|
||||
made by the process to the container's filesystem will persist after the
|
||||
process finished executing.
|
||||
|
||||
The started process will join all the container's existing namespaces. When the
|
||||
container is paused, the process will also be paused and will resume when
|
||||
the container is unpaused. The started process will only run when the container's
|
||||
primary process (PID 1) is running, and will not be restarted when the container
|
||||
is restarted.
|
||||
|
||||
#### Planned additions
|
||||
|
||||
The started process will have its own cgroups nested inside the container's
|
||||
cgroups. This is used for process tracking and optionally resource allocation
|
||||
handling for the new process. Freezer cgroup is required, the rest of the cgroups
|
||||
are optional. The process executor must place its pid inside the correct
|
||||
cgroups before starting the process. This is done so that no child processes or
|
||||
threads can escape the cgroups.
|
||||
|
||||
When the process is stopped, the process executor will try (in a best-effort way)
|
||||
to stop all its children and remove the sub-cgroups.
|
||||
114
vendor/github.com/opencontainers/runc/libcontainer/capabilities_linux.go
generated
vendored
114
vendor/github.com/opencontainers/runc/libcontainer/capabilities_linux.go
generated
vendored
@@ -1,114 +0,0 @@
|
||||
// +build linux
|
||||
|
||||
package libcontainer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
"github.com/syndtr/gocapability/capability"
|
||||
)
|
||||
|
||||
const allCapabilityTypes = capability.CAPS | capability.BOUNDS | capability.AMBS
|
||||
|
||||
var capabilityMap map[string]capability.Cap
|
||||
|
||||
func init() {
|
||||
capabilityMap = make(map[string]capability.Cap)
|
||||
last := capability.CAP_LAST_CAP
|
||||
// workaround for RHEL6 which has no /proc/sys/kernel/cap_last_cap
|
||||
if last == capability.Cap(63) {
|
||||
last = capability.CAP_BLOCK_SUSPEND
|
||||
}
|
||||
for _, cap := range capability.List() {
|
||||
if cap > last {
|
||||
continue
|
||||
}
|
||||
capKey := fmt.Sprintf("CAP_%s", strings.ToUpper(cap.String()))
|
||||
capabilityMap[capKey] = cap
|
||||
}
|
||||
}
|
||||
|
||||
func newContainerCapList(capConfig *configs.Capabilities) (*containerCapabilities, error) {
|
||||
bounding := []capability.Cap{}
|
||||
for _, c := range capConfig.Bounding {
|
||||
v, ok := capabilityMap[c]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unknown capability %q", c)
|
||||
}
|
||||
bounding = append(bounding, v)
|
||||
}
|
||||
effective := []capability.Cap{}
|
||||
for _, c := range capConfig.Effective {
|
||||
v, ok := capabilityMap[c]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unknown capability %q", c)
|
||||
}
|
||||
effective = append(effective, v)
|
||||
}
|
||||
inheritable := []capability.Cap{}
|
||||
for _, c := range capConfig.Inheritable {
|
||||
v, ok := capabilityMap[c]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unknown capability %q", c)
|
||||
}
|
||||
inheritable = append(inheritable, v)
|
||||
}
|
||||
permitted := []capability.Cap{}
|
||||
for _, c := range capConfig.Permitted {
|
||||
v, ok := capabilityMap[c]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unknown capability %q", c)
|
||||
}
|
||||
permitted = append(permitted, v)
|
||||
}
|
||||
ambient := []capability.Cap{}
|
||||
for _, c := range capConfig.Ambient {
|
||||
v, ok := capabilityMap[c]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unknown capability %q", c)
|
||||
}
|
||||
ambient = append(ambient, v)
|
||||
}
|
||||
pid, err := capability.NewPid(os.Getpid())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &containerCapabilities{
|
||||
bounding: bounding,
|
||||
effective: effective,
|
||||
inheritable: inheritable,
|
||||
permitted: permitted,
|
||||
ambient: ambient,
|
||||
pid: pid,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type containerCapabilities struct {
|
||||
pid capability.Capabilities
|
||||
bounding []capability.Cap
|
||||
effective []capability.Cap
|
||||
inheritable []capability.Cap
|
||||
permitted []capability.Cap
|
||||
ambient []capability.Cap
|
||||
}
|
||||
|
||||
// ApplyBoundingSet sets the capability bounding set to those specified in the whitelist.
|
||||
func (c *containerCapabilities) ApplyBoundingSet() error {
|
||||
c.pid.Clear(capability.BOUNDS)
|
||||
c.pid.Set(capability.BOUNDS, c.bounding...)
|
||||
return c.pid.Apply(capability.BOUNDS)
|
||||
}
|
||||
|
||||
// Apply sets all the capabilities for the current process in the config.
|
||||
func (c *containerCapabilities) ApplyCaps() error {
|
||||
c.pid.Clear(allCapabilityTypes)
|
||||
c.pid.Set(capability.BOUNDS, c.bounding...)
|
||||
c.pid.Set(capability.PERMITTED, c.permitted...)
|
||||
c.pid.Set(capability.INHERITABLE, c.inheritable...)
|
||||
c.pid.Set(capability.EFFECTIVE, c.effective...)
|
||||
c.pid.Set(capability.AMBIENT, c.ambient...)
|
||||
return c.pid.Apply(allCapabilityTypes)
|
||||
}
|
||||
10
vendor/github.com/opencontainers/runc/libcontainer/compat_1.5_linux.go
generated
vendored
10
vendor/github.com/opencontainers/runc/libcontainer/compat_1.5_linux.go
generated
vendored
@@ -1,10 +0,0 @@
|
||||
// +build linux,!go1.5
|
||||
|
||||
package libcontainer
|
||||
|
||||
import "syscall"
|
||||
|
||||
// GidMappingsEnableSetgroups was added in Go 1.5, so do nothing when building
|
||||
// with earlier versions
|
||||
func enableSetgroups(sys *syscall.SysProcAttr) {
|
||||
}
|
||||
17
vendor/github.com/opencontainers/runc/libcontainer/console.go
generated
vendored
17
vendor/github.com/opencontainers/runc/libcontainer/console.go
generated
vendored
@@ -1,17 +0,0 @@
|
||||
package libcontainer
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// Console represents a pseudo TTY.
|
||||
type Console interface {
|
||||
io.ReadWriteCloser
|
||||
|
||||
// Path returns the filesystem path to the slave side of the pty.
|
||||
Path() string
|
||||
|
||||
// Fd returns the fd for the master of the pty.
|
||||
File() *os.File
|
||||
}
|
||||
13
vendor/github.com/opencontainers/runc/libcontainer/console_freebsd.go
generated
vendored
13
vendor/github.com/opencontainers/runc/libcontainer/console_freebsd.go
generated
vendored
@@ -1,13 +0,0 @@
|
||||
// +build freebsd
|
||||
|
||||
package libcontainer
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
// newConsole returns an initialized console that can be used within a container by copying bytes
|
||||
// from the master side to the slave that is attached as the tty for the container's init process.
|
||||
func newConsole() (Console, error) {
|
||||
return nil, errors.New("libcontainer console is not supported on FreeBSD")
|
||||
}
|
||||
157
vendor/github.com/opencontainers/runc/libcontainer/console_linux.go
generated
vendored
157
vendor/github.com/opencontainers/runc/libcontainer/console_linux.go
generated
vendored
@@ -1,157 +0,0 @@
|
||||
package libcontainer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func ConsoleFromFile(f *os.File) Console {
|
||||
return &linuxConsole{
|
||||
master: f,
|
||||
}
|
||||
}
|
||||
|
||||
// newConsole returns an initialized console that can be used within a container by copying bytes
|
||||
// from the master side to the slave that is attached as the tty for the container's init process.
|
||||
func newConsole() (Console, error) {
|
||||
master, err := os.OpenFile("/dev/ptmx", unix.O_RDWR|unix.O_NOCTTY|unix.O_CLOEXEC, 0)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := saneTerminal(master); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
console, err := ptsname(master)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := unlockpt(master); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &linuxConsole{
|
||||
slavePath: console,
|
||||
master: master,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// linuxConsole is a linux pseudo TTY for use within a container.
|
||||
type linuxConsole struct {
|
||||
master *os.File
|
||||
slavePath string
|
||||
}
|
||||
|
||||
func (c *linuxConsole) File() *os.File {
|
||||
return c.master
|
||||
}
|
||||
|
||||
func (c *linuxConsole) Path() string {
|
||||
return c.slavePath
|
||||
}
|
||||
|
||||
func (c *linuxConsole) Read(b []byte) (int, error) {
|
||||
return c.master.Read(b)
|
||||
}
|
||||
|
||||
func (c *linuxConsole) Write(b []byte) (int, error) {
|
||||
return c.master.Write(b)
|
||||
}
|
||||
|
||||
func (c *linuxConsole) Close() error {
|
||||
if m := c.master; m != nil {
|
||||
return m.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// mount initializes the console inside the rootfs mounting with the specified mount label
|
||||
// and applying the correct ownership of the console.
|
||||
func (c *linuxConsole) mount() error {
|
||||
oldMask := unix.Umask(0000)
|
||||
defer unix.Umask(oldMask)
|
||||
f, err := os.Create("/dev/console")
|
||||
if err != nil && !os.IsExist(err) {
|
||||
return err
|
||||
}
|
||||
if f != nil {
|
||||
f.Close()
|
||||
}
|
||||
return unix.Mount(c.slavePath, "/dev/console", "bind", unix.MS_BIND, "")
|
||||
}
|
||||
|
||||
// dupStdio opens the slavePath for the console and dups the fds to the current
|
||||
// processes stdio, fd 0,1,2.
|
||||
func (c *linuxConsole) dupStdio() error {
|
||||
slave, err := c.open(unix.O_RDWR)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fd := int(slave.Fd())
|
||||
for _, i := range []int{0, 1, 2} {
|
||||
if err := unix.Dup3(fd, i, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// open is a clone of os.OpenFile without the O_CLOEXEC used to open the pty slave.
|
||||
func (c *linuxConsole) open(flag int) (*os.File, error) {
|
||||
r, e := unix.Open(c.slavePath, flag, 0)
|
||||
if e != nil {
|
||||
return nil, &os.PathError{
|
||||
Op: "open",
|
||||
Path: c.slavePath,
|
||||
Err: e,
|
||||
}
|
||||
}
|
||||
return os.NewFile(uintptr(r), c.slavePath), nil
|
||||
}
|
||||
|
||||
func ioctl(fd uintptr, flag, data uintptr) error {
|
||||
if _, _, err := unix.Syscall(unix.SYS_IOCTL, fd, flag, data); err != 0 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// unlockpt unlocks the slave pseudoterminal device corresponding to the master pseudoterminal referred to by f.
|
||||
// unlockpt should be called before opening the slave side of a pty.
|
||||
func unlockpt(f *os.File) error {
|
||||
var u int32
|
||||
return ioctl(f.Fd(), unix.TIOCSPTLCK, uintptr(unsafe.Pointer(&u)))
|
||||
}
|
||||
|
||||
// ptsname retrieves the name of the first available pts for the given master.
|
||||
func ptsname(f *os.File) (string, error) {
|
||||
var n int32
|
||||
if err := ioctl(f.Fd(), unix.TIOCGPTN, uintptr(unsafe.Pointer(&n))); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return fmt.Sprintf("/dev/pts/%d", n), nil
|
||||
}
|
||||
|
||||
// saneTerminal sets the necessary tty_ioctl(4)s to ensure that a pty pair
|
||||
// created by us acts normally. In particular, a not-very-well-known default of
|
||||
// Linux unix98 ptys is that they have +onlcr by default. While this isn't a
|
||||
// problem for terminal emulators, because we relay data from the terminal we
|
||||
// also relay that funky line discipline.
|
||||
func saneTerminal(terminal *os.File) error {
|
||||
// Go doesn't have a wrapper for any of the termios ioctls.
|
||||
var termios unix.Termios
|
||||
|
||||
if err := ioctl(terminal.Fd(), unix.TCGETS, uintptr(unsafe.Pointer(&termios))); err != nil {
|
||||
return fmt.Errorf("ioctl(tty, tcgets): %s", err.Error())
|
||||
}
|
||||
|
||||
// Set -onlcr so we don't have to deal with \r.
|
||||
termios.Oflag &^= unix.ONLCR
|
||||
|
||||
if err := ioctl(terminal.Fd(), unix.TCSETS, uintptr(unsafe.Pointer(&termios))); err != nil {
|
||||
return fmt.Errorf("ioctl(tty, tcsets): %s", err.Error())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
11
vendor/github.com/opencontainers/runc/libcontainer/console_solaris.go
generated
vendored
11
vendor/github.com/opencontainers/runc/libcontainer/console_solaris.go
generated
vendored
@@ -1,11 +0,0 @@
|
||||
package libcontainer
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
// newConsole returns an initialized console that can be used within a container by copying bytes
|
||||
// from the master side to the slave that is attached as the tty for the container's init process.
|
||||
func newConsole() (Console, error) {
|
||||
return nil, errors.New("libcontainer console is not supported on Solaris")
|
||||
}
|
||||
30
vendor/github.com/opencontainers/runc/libcontainer/console_windows.go
generated
vendored
30
vendor/github.com/opencontainers/runc/libcontainer/console_windows.go
generated
vendored
@@ -1,30 +0,0 @@
|
||||
package libcontainer
|
||||
|
||||
// newConsole returns an initialized console that can be used within a container
|
||||
func newConsole() (Console, error) {
|
||||
return &windowsConsole{}, nil
|
||||
}
|
||||
|
||||
// windowsConsole is a Windows pseudo TTY for use within a container.
|
||||
type windowsConsole struct {
|
||||
}
|
||||
|
||||
func (c *windowsConsole) Fd() uintptr {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (c *windowsConsole) Path() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (c *windowsConsole) Read(b []byte) (int, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (c *windowsConsole) Write(b []byte) (int, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
func (c *windowsConsole) Close() error {
|
||||
return nil
|
||||
}
|
||||
166
vendor/github.com/opencontainers/runc/libcontainer/container.go
generated
vendored
166
vendor/github.com/opencontainers/runc/libcontainer/container.go
generated
vendored
@@ -1,166 +0,0 @@
|
||||
// Package libcontainer provides a native Go implementation for creating containers
|
||||
// with namespaces, cgroups, capabilities, and filesystem access controls.
|
||||
// It allows you to manage the lifecycle of the container performing additional operations
|
||||
// after the container is created.
|
||||
package libcontainer
|
||||
|
||||
import (
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
)
|
||||
|
||||
// Status is the status of a container.
|
||||
type Status int
|
||||
|
||||
const (
|
||||
// Created is the status that denotes the container exists but has not been run yet.
|
||||
Created Status = iota
|
||||
// Running is the status that denotes the container exists and is running.
|
||||
Running
|
||||
// Pausing is the status that denotes the container exists, it is in the process of being paused.
|
||||
Pausing
|
||||
// Paused is the status that denotes the container exists, but all its processes are paused.
|
||||
Paused
|
||||
// Stopped is the status that denotes the container does not have a created or running process.
|
||||
Stopped
|
||||
)
|
||||
|
||||
func (s Status) String() string {
|
||||
switch s {
|
||||
case Created:
|
||||
return "created"
|
||||
case Running:
|
||||
return "running"
|
||||
case Pausing:
|
||||
return "pausing"
|
||||
case Paused:
|
||||
return "paused"
|
||||
case Stopped:
|
||||
return "stopped"
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
// BaseState represents the platform agnostic pieces relating to a
|
||||
// running container's state
|
||||
type BaseState struct {
|
||||
// ID is the container ID.
|
||||
ID string `json:"id"`
|
||||
|
||||
// InitProcessPid is the init process id in the parent namespace.
|
||||
InitProcessPid int `json:"init_process_pid"`
|
||||
|
||||
// InitProcessStartTime is the init process start time in clock cycles since boot time.
|
||||
InitProcessStartTime string `json:"init_process_start"`
|
||||
|
||||
// Created is the unix timestamp for the creation time of the container in UTC
|
||||
Created time.Time `json:"created"`
|
||||
|
||||
// Config is the container's configuration.
|
||||
Config configs.Config `json:"config"`
|
||||
}
|
||||
|
||||
// BaseContainer is a libcontainer container object.
|
||||
//
|
||||
// Each container is thread-safe within the same process. Since a container can
|
||||
// be destroyed by a separate process, any function may return that the container
|
||||
// was not found. BaseContainer includes methods that are platform agnostic.
|
||||
type BaseContainer interface {
|
||||
// Returns the ID of the container
|
||||
ID() string
|
||||
|
||||
// Returns the current status of the container.
|
||||
//
|
||||
// errors:
|
||||
// ContainerNotExists - Container no longer exists,
|
||||
// Systemerror - System error.
|
||||
Status() (Status, error)
|
||||
|
||||
// State returns the current container's state information.
|
||||
//
|
||||
// errors:
|
||||
// SystemError - System error.
|
||||
State() (*State, error)
|
||||
|
||||
// Returns the current config of the container.
|
||||
Config() configs.Config
|
||||
|
||||
// Returns the PIDs inside this container. The PIDs are in the namespace of the calling process.
|
||||
//
|
||||
// errors:
|
||||
// ContainerNotExists - Container no longer exists,
|
||||
// Systemerror - System error.
|
||||
//
|
||||
// Some of the returned PIDs may no longer refer to processes in the Container, unless
|
||||
// the Container state is PAUSED in which case every PID in the slice is valid.
|
||||
Processes() ([]int, error)
|
||||
|
||||
// Returns statistics for the container.
|
||||
//
|
||||
// errors:
|
||||
// ContainerNotExists - Container no longer exists,
|
||||
// Systemerror - System error.
|
||||
Stats() (*Stats, error)
|
||||
|
||||
// Set resources of container as configured
|
||||
//
|
||||
// We can use this to change resources when containers are running.
|
||||
//
|
||||
// errors:
|
||||
// SystemError - System error.
|
||||
Set(config configs.Config) error
|
||||
|
||||
// Start a process inside the container. Returns error if process fails to
|
||||
// start. You can track process lifecycle with passed Process structure.
|
||||
//
|
||||
// errors:
|
||||
// ContainerNotExists - Container no longer exists,
|
||||
// ConfigInvalid - config is invalid,
|
||||
// ContainerPaused - Container is paused,
|
||||
// SystemError - System error.
|
||||
Start(process *Process) (err error)
|
||||
|
||||
// Run immediately starts the process inside the container. Returns error if process
|
||||
// fails to start. It does not block waiting for the exec fifo after start returns but
|
||||
// opens the fifo after start returns.
|
||||
//
|
||||
// errors:
|
||||
// ContainerNotExists - Container no longer exists,
|
||||
// ConfigInvalid - config is invalid,
|
||||
// ContainerPaused - Container is paused,
|
||||
// SystemError - System error.
|
||||
Run(process *Process) (err error)
|
||||
|
||||
// Destroys the container, if its in a valid state, after killing any
|
||||
// remaining running processes.
|
||||
//
|
||||
// Any event registrations are removed before the container is destroyed.
|
||||
// No error is returned if the container is already destroyed.
|
||||
//
|
||||
// Running containers must first be stopped using Signal(..).
|
||||
// Paused containers must first be resumed using Resume(..).
|
||||
//
|
||||
// errors:
|
||||
// ContainerNotStopped - Container is still running,
|
||||
// ContainerPaused - Container is paused,
|
||||
// SystemError - System error.
|
||||
Destroy() error
|
||||
|
||||
// Signal sends the provided signal code to the container's initial process.
|
||||
//
|
||||
// If all is specified the signal is sent to all processes in the container
|
||||
// including the initial process.
|
||||
//
|
||||
// errors:
|
||||
// SystemError - System error.
|
||||
Signal(s os.Signal, all bool) error
|
||||
|
||||
// Exec signals the container to exec the users process at the end of the init.
|
||||
//
|
||||
// errors:
|
||||
// SystemError - System error.
|
||||
Exec() error
|
||||
}
|
||||
1597
vendor/github.com/opencontainers/runc/libcontainer/container_linux.go
generated
vendored
1597
vendor/github.com/opencontainers/runc/libcontainer/container_linux.go
generated
vendored
File diff suppressed because it is too large
Load Diff
20
vendor/github.com/opencontainers/runc/libcontainer/container_solaris.go
generated
vendored
20
vendor/github.com/opencontainers/runc/libcontainer/container_solaris.go
generated
vendored
@@ -1,20 +0,0 @@
|
||||
package libcontainer
|
||||
|
||||
// State represents a running container's state
|
||||
type State struct {
|
||||
BaseState
|
||||
|
||||
// Platform specific fields below here
|
||||
}
|
||||
|
||||
// A libcontainer container object.
|
||||
//
|
||||
// Each container is thread-safe within the same process. Since a container can
|
||||
// be destroyed by a separate process, any function may return that the container
|
||||
// was not found.
|
||||
type Container interface {
|
||||
BaseContainer
|
||||
|
||||
// Methods below here are platform specific
|
||||
|
||||
}
|
||||
20
vendor/github.com/opencontainers/runc/libcontainer/container_windows.go
generated
vendored
20
vendor/github.com/opencontainers/runc/libcontainer/container_windows.go
generated
vendored
@@ -1,20 +0,0 @@
|
||||
package libcontainer
|
||||
|
||||
// State represents a running container's state
|
||||
type State struct {
|
||||
BaseState
|
||||
|
||||
// Platform specific fields below here
|
||||
}
|
||||
|
||||
// A libcontainer container object.
|
||||
//
|
||||
// Each container is thread-safe within the same process. Since a container can
|
||||
// be destroyed by a separate process, any function may return that the container
|
||||
// was not found.
|
||||
type Container interface {
|
||||
BaseContainer
|
||||
|
||||
// Methods below here are platform specific
|
||||
|
||||
}
|
||||
37
vendor/github.com/opencontainers/runc/libcontainer/criu_opts_linux.go
generated
vendored
37
vendor/github.com/opencontainers/runc/libcontainer/criu_opts_linux.go
generated
vendored
@@ -1,37 +0,0 @@
|
||||
package libcontainer
|
||||
|
||||
// cgroup restoring strategy provided by criu
|
||||
type cgMode uint32
|
||||
|
||||
const (
|
||||
CRIU_CG_MODE_SOFT cgMode = 3 + iota // restore cgroup properties if only dir created by criu
|
||||
CRIU_CG_MODE_FULL // always restore all cgroups and their properties
|
||||
CRIU_CG_MODE_STRICT // restore all, requiring them to not present in the system
|
||||
CRIU_CG_MODE_DEFAULT // the same as CRIU_CG_MODE_SOFT
|
||||
)
|
||||
|
||||
type CriuPageServerInfo struct {
|
||||
Address string // IP address of CRIU page server
|
||||
Port int32 // port number of CRIU page server
|
||||
}
|
||||
|
||||
type VethPairName struct {
|
||||
ContainerInterfaceName string
|
||||
HostInterfaceName string
|
||||
}
|
||||
|
||||
type CriuOpts struct {
|
||||
ImagesDirectory string // directory for storing image files
|
||||
WorkDirectory string // directory to cd and write logs/pidfiles/stats to
|
||||
ParentImage string // direcotry for storing parent image files in pre-dump and dump
|
||||
LeaveRunning bool // leave container in running state after checkpoint
|
||||
TcpEstablished bool // checkpoint/restore established TCP connections
|
||||
ExternalUnixConnections bool // allow external unix connections
|
||||
ShellJob bool // allow to dump and restore shell jobs
|
||||
FileLocks bool // handle file locks, for safety
|
||||
PreDump bool // call criu predump to perform iterative checkpoint
|
||||
PageServer CriuPageServerInfo // allow to dump to criu page server
|
||||
VethPairs []VethPairName // pass the veth to criu when restore
|
||||
ManageCgroupsMode cgMode // dump or restore cgroup mode
|
||||
EmptyNs uint32 // don't c/r properties for namespace from this mask
|
||||
}
|
||||
6
vendor/github.com/opencontainers/runc/libcontainer/criu_opts_windows.go
generated
vendored
6
vendor/github.com/opencontainers/runc/libcontainer/criu_opts_windows.go
generated
vendored
@@ -1,6 +0,0 @@
|
||||
package libcontainer
|
||||
|
||||
// TODO Windows: This can ultimately be entirely factored out as criu is
|
||||
// a Unix concept not relevant on Windows.
|
||||
type CriuOpts struct {
|
||||
}
|
||||
70
vendor/github.com/opencontainers/runc/libcontainer/error.go
generated
vendored
70
vendor/github.com/opencontainers/runc/libcontainer/error.go
generated
vendored
@@ -1,70 +0,0 @@
|
||||
package libcontainer
|
||||
|
||||
import "io"
|
||||
|
||||
// ErrorCode is the API error code type.
|
||||
type ErrorCode int
|
||||
|
||||
// API error codes.
|
||||
const (
|
||||
// Factory errors
|
||||
IdInUse ErrorCode = iota
|
||||
InvalidIdFormat
|
||||
|
||||
// Container errors
|
||||
ContainerNotExists
|
||||
ContainerPaused
|
||||
ContainerNotStopped
|
||||
ContainerNotRunning
|
||||
ContainerNotPaused
|
||||
|
||||
// Process errors
|
||||
NoProcessOps
|
||||
|
||||
// Common errors
|
||||
ConfigInvalid
|
||||
ConsoleExists
|
||||
SystemError
|
||||
)
|
||||
|
||||
func (c ErrorCode) String() string {
|
||||
switch c {
|
||||
case IdInUse:
|
||||
return "Id already in use"
|
||||
case InvalidIdFormat:
|
||||
return "Invalid format"
|
||||
case ContainerPaused:
|
||||
return "Container paused"
|
||||
case ConfigInvalid:
|
||||
return "Invalid configuration"
|
||||
case SystemError:
|
||||
return "System error"
|
||||
case ContainerNotExists:
|
||||
return "Container does not exist"
|
||||
case ContainerNotStopped:
|
||||
return "Container is not stopped"
|
||||
case ContainerNotRunning:
|
||||
return "Container is not running"
|
||||
case ConsoleExists:
|
||||
return "Console exists for process"
|
||||
case ContainerNotPaused:
|
||||
return "Container is not paused"
|
||||
case NoProcessOps:
|
||||
return "No process operations"
|
||||
default:
|
||||
return "Unknown error"
|
||||
}
|
||||
}
|
||||
|
||||
// Error is the API error type.
|
||||
type Error interface {
|
||||
error
|
||||
|
||||
// Returns an error if it failed to write the detail of the Error to w.
|
||||
// The detail of the Error may include the error message and a
|
||||
// representation of the stack trace.
|
||||
Detail(w io.Writer) error
|
||||
|
||||
// Returns the error code for this error.
|
||||
Code() ErrorCode
|
||||
}
|
||||
44
vendor/github.com/opencontainers/runc/libcontainer/factory.go
generated
vendored
44
vendor/github.com/opencontainers/runc/libcontainer/factory.go
generated
vendored
@@ -1,44 +0,0 @@
|
||||
package libcontainer
|
||||
|
||||
import (
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
)
|
||||
|
||||
type Factory interface {
|
||||
// Creates a new container with the given id and starts the initial process inside it.
|
||||
// id must be a string containing only letters, digits and underscores and must contain
|
||||
// between 1 and 1024 characters, inclusive.
|
||||
//
|
||||
// The id must not already be in use by an existing container. Containers created using
|
||||
// a factory with the same path (and filesystem) must have distinct ids.
|
||||
//
|
||||
// Returns the new container with a running process.
|
||||
//
|
||||
// errors:
|
||||
// IdInUse - id is already in use by a container
|
||||
// InvalidIdFormat - id has incorrect format
|
||||
// ConfigInvalid - config is invalid
|
||||
// Systemerror - System error
|
||||
//
|
||||
// On error, any partially created container parts are cleaned up (the operation is atomic).
|
||||
Create(id string, config *configs.Config) (Container, error)
|
||||
|
||||
// Load takes an ID for an existing container and returns the container information
|
||||
// from the state. This presents a read only view of the container.
|
||||
//
|
||||
// errors:
|
||||
// Path does not exist
|
||||
// System error
|
||||
Load(id string) (Container, error)
|
||||
|
||||
// StartInitialization is an internal API to libcontainer used during the reexec of the
|
||||
// container.
|
||||
//
|
||||
// Errors:
|
||||
// Pipe connection error
|
||||
// System error
|
||||
StartInitialization() error
|
||||
|
||||
// Type returns info string about factory type (e.g. lxc, libcontainer...)
|
||||
Type() string
|
||||
}
|
||||
333
vendor/github.com/opencontainers/runc/libcontainer/factory_linux.go
generated
vendored
333
vendor/github.com/opencontainers/runc/libcontainer/factory_linux.go
generated
vendored
@@ -1,333 +0,0 @@
|
||||
// +build linux
|
||||
|
||||
package libcontainer
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"runtime/debug"
|
||||
"strconv"
|
||||
|
||||
"github.com/docker/docker/pkg/mount"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups/fs"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups/rootless"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups/systemd"
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
"github.com/opencontainers/runc/libcontainer/configs/validate"
|
||||
"github.com/opencontainers/runc/libcontainer/utils"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const (
|
||||
stateFilename = "state.json"
|
||||
execFifoFilename = "exec.fifo"
|
||||
)
|
||||
|
||||
var idRegex = regexp.MustCompile(`^[\w+-\.]+$`)
|
||||
|
||||
// InitArgs returns an options func to configure a LinuxFactory with the
|
||||
// provided init binary path and arguments.
|
||||
func InitArgs(args ...string) func(*LinuxFactory) error {
|
||||
return func(l *LinuxFactory) (err error) {
|
||||
if len(args) > 0 {
|
||||
// Resolve relative paths to ensure that its available
|
||||
// after directory changes.
|
||||
if args[0], err = filepath.Abs(args[0]); err != nil {
|
||||
return newGenericError(err, ConfigInvalid)
|
||||
}
|
||||
}
|
||||
|
||||
l.InitArgs = args
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// SystemdCgroups is an options func to configure a LinuxFactory to return
|
||||
// containers that use systemd to create and manage cgroups.
|
||||
func SystemdCgroups(l *LinuxFactory) error {
|
||||
l.NewCgroupsManager = func(config *configs.Cgroup, paths map[string]string) cgroups.Manager {
|
||||
return &systemd.Manager{
|
||||
Cgroups: config,
|
||||
Paths: paths,
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Cgroupfs is an options func to configure a LinuxFactory to return
|
||||
// containers that use the native cgroups filesystem implementation to
|
||||
// create and manage cgroups.
|
||||
func Cgroupfs(l *LinuxFactory) error {
|
||||
l.NewCgroupsManager = func(config *configs.Cgroup, paths map[string]string) cgroups.Manager {
|
||||
return &fs.Manager{
|
||||
Cgroups: config,
|
||||
Paths: paths,
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RootlessCgroups is an options func to configure a LinuxFactory to
|
||||
// return containers that use the "rootless" cgroup manager, which will
|
||||
// fail to do any operations not possible to do with an unprivileged user.
|
||||
// It should only be used in conjunction with rootless containers.
|
||||
func RootlessCgroups(l *LinuxFactory) error {
|
||||
l.NewCgroupsManager = func(config *configs.Cgroup, paths map[string]string) cgroups.Manager {
|
||||
return &rootless.Manager{
|
||||
Cgroups: config,
|
||||
Paths: paths,
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// TmpfsRoot is an option func to mount LinuxFactory.Root to tmpfs.
|
||||
func TmpfsRoot(l *LinuxFactory) error {
|
||||
mounted, err := mount.Mounted(l.Root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !mounted {
|
||||
if err := unix.Mount("tmpfs", l.Root, "tmpfs", 0, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CriuPath returns an option func to configure a LinuxFactory with the
|
||||
// provided criupath
|
||||
func CriuPath(criupath string) func(*LinuxFactory) error {
|
||||
return func(l *LinuxFactory) error {
|
||||
l.CriuPath = criupath
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// New returns a linux based container factory based in the root directory and
|
||||
// configures the factory with the provided option funcs.
|
||||
func New(root string, options ...func(*LinuxFactory) error) (Factory, error) {
|
||||
if root != "" {
|
||||
if err := os.MkdirAll(root, 0700); err != nil {
|
||||
return nil, newGenericError(err, SystemError)
|
||||
}
|
||||
}
|
||||
l := &LinuxFactory{
|
||||
Root: root,
|
||||
InitArgs: []string{"/proc/self/exe", "init"},
|
||||
Validator: validate.New(),
|
||||
CriuPath: "criu",
|
||||
}
|
||||
Cgroupfs(l)
|
||||
for _, opt := range options {
|
||||
if err := opt(l); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return l, nil
|
||||
}
|
||||
|
||||
// LinuxFactory implements the default factory interface for linux based systems.
|
||||
type LinuxFactory struct {
|
||||
// Root directory for the factory to store state.
|
||||
Root string
|
||||
|
||||
// InitArgs are arguments for calling the init responsibilities for spawning
|
||||
// a container.
|
||||
InitArgs []string
|
||||
|
||||
// CriuPath is the path to the criu binary used for checkpoint and restore of
|
||||
// containers.
|
||||
CriuPath string
|
||||
|
||||
// Validator provides validation to container configurations.
|
||||
Validator validate.Validator
|
||||
|
||||
// NewCgroupsManager returns an initialized cgroups manager for a single container.
|
||||
NewCgroupsManager func(config *configs.Cgroup, paths map[string]string) cgroups.Manager
|
||||
}
|
||||
|
||||
func (l *LinuxFactory) Create(id string, config *configs.Config) (Container, error) {
|
||||
if l.Root == "" {
|
||||
return nil, newGenericError(fmt.Errorf("invalid root"), ConfigInvalid)
|
||||
}
|
||||
if err := l.validateID(id); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := l.Validator.Validate(config); err != nil {
|
||||
return nil, newGenericError(err, ConfigInvalid)
|
||||
}
|
||||
uid, err := config.HostRootUID()
|
||||
if err != nil {
|
||||
return nil, newGenericError(err, SystemError)
|
||||
}
|
||||
gid, err := config.HostRootGID()
|
||||
if err != nil {
|
||||
return nil, newGenericError(err, SystemError)
|
||||
}
|
||||
containerRoot := filepath.Join(l.Root, id)
|
||||
if _, err := os.Stat(containerRoot); err == nil {
|
||||
return nil, newGenericError(fmt.Errorf("container with id exists: %v", id), IdInUse)
|
||||
} else if !os.IsNotExist(err) {
|
||||
return nil, newGenericError(err, SystemError)
|
||||
}
|
||||
if err := os.MkdirAll(containerRoot, 0711); err != nil {
|
||||
return nil, newGenericError(err, SystemError)
|
||||
}
|
||||
if err := os.Chown(containerRoot, uid, gid); err != nil {
|
||||
return nil, newGenericError(err, SystemError)
|
||||
}
|
||||
if config.Rootless {
|
||||
RootlessCgroups(l)
|
||||
}
|
||||
c := &linuxContainer{
|
||||
id: id,
|
||||
root: containerRoot,
|
||||
config: config,
|
||||
initArgs: l.InitArgs,
|
||||
criuPath: l.CriuPath,
|
||||
cgroupManager: l.NewCgroupsManager(config.Cgroups, nil),
|
||||
}
|
||||
c.state = &stoppedState{c: c}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (l *LinuxFactory) Load(id string) (Container, error) {
|
||||
if l.Root == "" {
|
||||
return nil, newGenericError(fmt.Errorf("invalid root"), ConfigInvalid)
|
||||
}
|
||||
containerRoot := filepath.Join(l.Root, id)
|
||||
state, err := l.loadState(containerRoot, id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r := &nonChildProcess{
|
||||
processPid: state.InitProcessPid,
|
||||
processStartTime: state.InitProcessStartTime,
|
||||
fds: state.ExternalDescriptors,
|
||||
}
|
||||
// We have to use the RootlessManager.
|
||||
if state.Rootless {
|
||||
RootlessCgroups(l)
|
||||
}
|
||||
c := &linuxContainer{
|
||||
initProcess: r,
|
||||
initProcessStartTime: state.InitProcessStartTime,
|
||||
id: id,
|
||||
config: &state.Config,
|
||||
initArgs: l.InitArgs,
|
||||
criuPath: l.CriuPath,
|
||||
cgroupManager: l.NewCgroupsManager(state.Config.Cgroups, state.CgroupPaths),
|
||||
root: containerRoot,
|
||||
created: state.Created,
|
||||
}
|
||||
c.state = &loadedState{c: c}
|
||||
if err := c.refreshState(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (l *LinuxFactory) Type() string {
|
||||
return "libcontainer"
|
||||
}
|
||||
|
||||
// StartInitialization loads a container by opening the pipe fd from the parent to read the configuration and state
|
||||
// This is a low level implementation detail of the reexec and should not be consumed externally
|
||||
func (l *LinuxFactory) StartInitialization() (err error) {
|
||||
var (
|
||||
pipefd, rootfd int
|
||||
consoleSocket *os.File
|
||||
envInitPipe = os.Getenv("_LIBCONTAINER_INITPIPE")
|
||||
envStateDir = os.Getenv("_LIBCONTAINER_STATEDIR")
|
||||
envConsole = os.Getenv("_LIBCONTAINER_CONSOLE")
|
||||
)
|
||||
|
||||
// Get the INITPIPE.
|
||||
pipefd, err = strconv.Atoi(envInitPipe)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to convert _LIBCONTAINER_INITPIPE=%s to int: %s", envInitPipe, err)
|
||||
}
|
||||
|
||||
var (
|
||||
pipe = os.NewFile(uintptr(pipefd), "pipe")
|
||||
it = initType(os.Getenv("_LIBCONTAINER_INITTYPE"))
|
||||
)
|
||||
defer pipe.Close()
|
||||
|
||||
// Only init processes have STATEDIR.
|
||||
rootfd = -1
|
||||
if it == initStandard {
|
||||
if rootfd, err = strconv.Atoi(envStateDir); err != nil {
|
||||
return fmt.Errorf("unable to convert _LIBCONTAINER_STATEDIR=%s to int: %s", envStateDir, err)
|
||||
}
|
||||
}
|
||||
|
||||
if envConsole != "" {
|
||||
console, err := strconv.Atoi(envConsole)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to convert _LIBCONTAINER_CONSOLE=%s to int: %s", envConsole, err)
|
||||
}
|
||||
consoleSocket = os.NewFile(uintptr(console), "console-socket")
|
||||
defer consoleSocket.Close()
|
||||
}
|
||||
|
||||
// clear the current process's environment to clean any libcontainer
|
||||
// specific env vars.
|
||||
os.Clearenv()
|
||||
|
||||
defer func() {
|
||||
// We have an error during the initialization of the container's init,
|
||||
// send it back to the parent process in the form of an initError.
|
||||
if werr := utils.WriteJSON(pipe, syncT{procError}); werr != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
return
|
||||
}
|
||||
if werr := utils.WriteJSON(pipe, newSystemError(err)); werr != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
return
|
||||
}
|
||||
}()
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
err = fmt.Errorf("panic from initialization: %v, %v", e, string(debug.Stack()))
|
||||
}
|
||||
}()
|
||||
|
||||
i, err := newContainerInit(it, pipe, consoleSocket, rootfd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If Init succeeds, syscall.Exec will not return, hence none of the defers will be called.
|
||||
return i.Init()
|
||||
}
|
||||
|
||||
func (l *LinuxFactory) loadState(root, id string) (*State, error) {
|
||||
f, err := os.Open(filepath.Join(root, stateFilename))
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, newGenericError(fmt.Errorf("container %q does not exist", id), ContainerNotExists)
|
||||
}
|
||||
return nil, newGenericError(err, SystemError)
|
||||
}
|
||||
defer f.Close()
|
||||
var state *State
|
||||
if err := json.NewDecoder(f).Decode(&state); err != nil {
|
||||
return nil, newGenericError(err, SystemError)
|
||||
}
|
||||
return state, nil
|
||||
}
|
||||
|
||||
func (l *LinuxFactory) validateID(id string) error {
|
||||
if !idRegex.MatchString(id) {
|
||||
return newGenericError(fmt.Errorf("invalid id format: %v", id), InvalidIdFormat)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
92
vendor/github.com/opencontainers/runc/libcontainer/generic_error.go
generated
vendored
92
vendor/github.com/opencontainers/runc/libcontainer/generic_error.go
generated
vendored
@@ -1,92 +0,0 @@
|
||||
package libcontainer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/stacktrace"
|
||||
)
|
||||
|
||||
var errorTemplate = template.Must(template.New("error").Parse(`Timestamp: {{.Timestamp}}
|
||||
Code: {{.ECode}}
|
||||
{{if .Message }}
|
||||
Message: {{.Message}}
|
||||
{{end}}
|
||||
Frames:{{range $i, $frame := .Stack.Frames}}
|
||||
---
|
||||
{{$i}}: {{$frame.Function}}
|
||||
Package: {{$frame.Package}}
|
||||
File: {{$frame.File}}@{{$frame.Line}}{{end}}
|
||||
`))
|
||||
|
||||
func newGenericError(err error, c ErrorCode) Error {
|
||||
if le, ok := err.(Error); ok {
|
||||
return le
|
||||
}
|
||||
gerr := &genericError{
|
||||
Timestamp: time.Now(),
|
||||
Err: err,
|
||||
ECode: c,
|
||||
Stack: stacktrace.Capture(1),
|
||||
}
|
||||
if err != nil {
|
||||
gerr.Message = err.Error()
|
||||
}
|
||||
return gerr
|
||||
}
|
||||
|
||||
func newSystemError(err error) Error {
|
||||
return createSystemError(err, "")
|
||||
}
|
||||
|
||||
func newSystemErrorWithCausef(err error, cause string, v ...interface{}) Error {
|
||||
return createSystemError(err, fmt.Sprintf(cause, v...))
|
||||
}
|
||||
|
||||
func newSystemErrorWithCause(err error, cause string) Error {
|
||||
return createSystemError(err, cause)
|
||||
}
|
||||
|
||||
// createSystemError creates the specified error with the correct number of
|
||||
// stack frames skipped. This is only to be called by the other functions for
|
||||
// formatting the error.
|
||||
func createSystemError(err error, cause string) Error {
|
||||
gerr := &genericError{
|
||||
Timestamp: time.Now(),
|
||||
Err: err,
|
||||
ECode: SystemError,
|
||||
Cause: cause,
|
||||
Stack: stacktrace.Capture(2),
|
||||
}
|
||||
if err != nil {
|
||||
gerr.Message = err.Error()
|
||||
}
|
||||
return gerr
|
||||
}
|
||||
|
||||
type genericError struct {
|
||||
Timestamp time.Time
|
||||
ECode ErrorCode
|
||||
Err error `json:"-"`
|
||||
Cause string
|
||||
Message string
|
||||
Stack stacktrace.Stacktrace
|
||||
}
|
||||
|
||||
func (e *genericError) Error() string {
|
||||
if e.Cause == "" {
|
||||
return e.Message
|
||||
}
|
||||
frame := e.Stack.Frames[0]
|
||||
return fmt.Sprintf("%s:%d: %s caused %q", frame.File, frame.Line, e.Cause, e.Message)
|
||||
}
|
||||
|
||||
func (e *genericError) Code() ErrorCode {
|
||||
return e.ECode
|
||||
}
|
||||
|
||||
func (e *genericError) Detail(w io.Writer) error {
|
||||
return errorTemplate.Execute(w, e)
|
||||
}
|
||||
502
vendor/github.com/opencontainers/runc/libcontainer/init_linux.go
generated
vendored
502
vendor/github.com/opencontainers/runc/libcontainer/init_linux.go
generated
vendored
@@ -1,502 +0,0 @@
|
||||
// +build linux
|
||||
|
||||
package libcontainer
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
"syscall" // only for Errno
|
||||
"unsafe"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
"github.com/opencontainers/runc/libcontainer/system"
|
||||
"github.com/opencontainers/runc/libcontainer/user"
|
||||
"github.com/opencontainers/runc/libcontainer/utils"
|
||||
"github.com/vishvananda/netlink"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
type initType string
|
||||
|
||||
const (
|
||||
initSetns initType = "setns"
|
||||
initStandard initType = "standard"
|
||||
)
|
||||
|
||||
type pid struct {
|
||||
Pid int `json:"pid"`
|
||||
}
|
||||
|
||||
// network is an internal struct used to setup container networks.
|
||||
type network struct {
|
||||
configs.Network
|
||||
|
||||
// TempVethPeerName is a unique temporary veth peer name that was placed into
|
||||
// the container's namespace.
|
||||
TempVethPeerName string `json:"temp_veth_peer_name"`
|
||||
}
|
||||
|
||||
// initConfig is used for transferring parameters from Exec() to Init()
|
||||
type initConfig struct {
|
||||
Args []string `json:"args"`
|
||||
Env []string `json:"env"`
|
||||
Cwd string `json:"cwd"`
|
||||
Capabilities *configs.Capabilities `json:"capabilities"`
|
||||
ProcessLabel string `json:"process_label"`
|
||||
AppArmorProfile string `json:"apparmor_profile"`
|
||||
NoNewPrivileges bool `json:"no_new_privileges"`
|
||||
User string `json:"user"`
|
||||
AdditionalGroups []string `json:"additional_groups"`
|
||||
Config *configs.Config `json:"config"`
|
||||
Networks []*network `json:"network"`
|
||||
PassedFilesCount int `json:"passed_files_count"`
|
||||
ContainerId string `json:"containerid"`
|
||||
Rlimits []configs.Rlimit `json:"rlimits"`
|
||||
CreateConsole bool `json:"create_console"`
|
||||
Rootless bool `json:"rootless"`
|
||||
}
|
||||
|
||||
type initer interface {
|
||||
Init() error
|
||||
}
|
||||
|
||||
func newContainerInit(t initType, pipe *os.File, consoleSocket *os.File, stateDirFD int) (initer, error) {
|
||||
var config *initConfig
|
||||
if err := json.NewDecoder(pipe).Decode(&config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := populateProcessEnvironment(config.Env); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch t {
|
||||
case initSetns:
|
||||
return &linuxSetnsInit{
|
||||
pipe: pipe,
|
||||
consoleSocket: consoleSocket,
|
||||
config: config,
|
||||
}, nil
|
||||
case initStandard:
|
||||
return &linuxStandardInit{
|
||||
pipe: pipe,
|
||||
consoleSocket: consoleSocket,
|
||||
parentPid: unix.Getppid(),
|
||||
config: config,
|
||||
stateDirFD: stateDirFD,
|
||||
}, nil
|
||||
}
|
||||
return nil, fmt.Errorf("unknown init type %q", t)
|
||||
}
|
||||
|
||||
// populateProcessEnvironment loads the provided environment variables into the
|
||||
// current processes's environment.
|
||||
func populateProcessEnvironment(env []string) error {
|
||||
for _, pair := range env {
|
||||
p := strings.SplitN(pair, "=", 2)
|
||||
if len(p) < 2 {
|
||||
return fmt.Errorf("invalid environment '%v'", pair)
|
||||
}
|
||||
if err := os.Setenv(p[0], p[1]); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// finalizeNamespace drops the caps, sets the correct user
|
||||
// and working dir, and closes any leaked file descriptors
|
||||
// before executing the command inside the namespace
|
||||
func finalizeNamespace(config *initConfig) error {
|
||||
// Ensure that all unwanted fds we may have accidentally
|
||||
// inherited are marked close-on-exec so they stay out of the
|
||||
// container
|
||||
if err := utils.CloseExecFrom(config.PassedFilesCount + 3); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
capabilities := &configs.Capabilities{}
|
||||
if config.Capabilities != nil {
|
||||
capabilities = config.Capabilities
|
||||
} else if config.Config.Capabilities != nil {
|
||||
capabilities = config.Config.Capabilities
|
||||
}
|
||||
w, err := newContainerCapList(capabilities)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// drop capabilities in bounding set before changing user
|
||||
if err := w.ApplyBoundingSet(); err != nil {
|
||||
return err
|
||||
}
|
||||
// preserve existing capabilities while we change users
|
||||
if err := system.SetKeepCaps(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := setupUser(config); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := system.ClearKeepCaps(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := w.ApplyCaps(); err != nil {
|
||||
return err
|
||||
}
|
||||
if config.Cwd != "" {
|
||||
if err := unix.Chdir(config.Cwd); err != nil {
|
||||
return fmt.Errorf("chdir to cwd (%q) set in config.json failed: %v", config.Cwd, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// setupConsole sets up the console from inside the container, and sends the
|
||||
// master pty fd to the config.Pipe (using cmsg). This is done to ensure that
|
||||
// consoles are scoped to a container properly (see runc#814 and the many
|
||||
// issues related to that). This has to be run *after* we've pivoted to the new
|
||||
// rootfs (and the users' configuration is entirely set up).
|
||||
func setupConsole(socket *os.File, config *initConfig, mount bool) error {
|
||||
defer socket.Close()
|
||||
// At this point, /dev/ptmx points to something that we would expect. We
|
||||
// used to change the owner of the slave path, but since the /dev/pts mount
|
||||
// can have gid=X set (at the users' option). So touching the owner of the
|
||||
// slave PTY is not necessary, as the kernel will handle that for us. Note
|
||||
// however, that setupUser (specifically fixStdioPermissions) *will* change
|
||||
// the UID owner of the console to be the user the process will run as (so
|
||||
// they can actually control their console).
|
||||
console, err := newConsole()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// After we return from here, we don't need the console anymore.
|
||||
defer console.Close()
|
||||
|
||||
linuxConsole, ok := console.(*linuxConsole)
|
||||
if !ok {
|
||||
return fmt.Errorf("failed to cast console to *linuxConsole")
|
||||
}
|
||||
// Mount the console inside our rootfs.
|
||||
if mount {
|
||||
if err := linuxConsole.mount(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// While we can access console.master, using the API is a good idea.
|
||||
if err := utils.SendFd(socket, linuxConsole.File()); err != nil {
|
||||
return err
|
||||
}
|
||||
// Now, dup over all the things.
|
||||
return linuxConsole.dupStdio()
|
||||
}
|
||||
|
||||
// syncParentReady sends to the given pipe a JSON payload which indicates that
|
||||
// the init is ready to Exec the child process. It then waits for the parent to
|
||||
// indicate that it is cleared to Exec.
|
||||
func syncParentReady(pipe io.ReadWriter) error {
|
||||
// Tell parent.
|
||||
if err := writeSync(pipe, procReady); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait for parent to give the all-clear.
|
||||
if err := readSync(pipe, procRun); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// syncParentHooks sends to the given pipe a JSON payload which indicates that
|
||||
// the parent should execute pre-start hooks. It then waits for the parent to
|
||||
// indicate that it is cleared to resume.
|
||||
func syncParentHooks(pipe io.ReadWriter) error {
|
||||
// Tell parent.
|
||||
if err := writeSync(pipe, procHooks); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Wait for parent to give the all-clear.
|
||||
if err := readSync(pipe, procResume); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// setupUser changes the groups, gid, and uid for the user inside the container
|
||||
func setupUser(config *initConfig) error {
|
||||
// Set up defaults.
|
||||
defaultExecUser := user.ExecUser{
|
||||
Uid: 0,
|
||||
Gid: 0,
|
||||
Home: "/",
|
||||
}
|
||||
|
||||
passwdPath, err := user.GetPasswdPath()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
groupPath, err := user.GetGroupPath()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
execUser, err := user.GetExecUserPath(config.User, &defaultExecUser, passwdPath, groupPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var addGroups []int
|
||||
if len(config.AdditionalGroups) > 0 {
|
||||
addGroups, err = user.GetAdditionalGroupsPath(config.AdditionalGroups, groupPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if config.Rootless {
|
||||
if execUser.Uid != 0 {
|
||||
return fmt.Errorf("cannot run as a non-root user in a rootless container")
|
||||
}
|
||||
|
||||
if execUser.Gid != 0 {
|
||||
return fmt.Errorf("cannot run as a non-root group in a rootless container")
|
||||
}
|
||||
|
||||
// We cannot set any additional groups in a rootless container and thus we
|
||||
// bail if the user asked us to do so. TODO: We currently can't do this
|
||||
// earlier, but if libcontainer.Process.User was typesafe this might work.
|
||||
if len(addGroups) > 0 {
|
||||
return fmt.Errorf("cannot set any additional groups in a rootless container")
|
||||
}
|
||||
}
|
||||
|
||||
// before we change to the container's user make sure that the processes STDIO
|
||||
// is correctly owned by the user that we are switching to.
|
||||
if err := fixStdioPermissions(config, execUser); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// This isn't allowed in an unprivileged user namespace since Linux 3.19.
|
||||
// There's nothing we can do about /etc/group entries, so we silently
|
||||
// ignore setting groups here (since the user didn't explicitly ask us to
|
||||
// set the group).
|
||||
if !config.Rootless {
|
||||
suppGroups := append(execUser.Sgids, addGroups...)
|
||||
if err := unix.Setgroups(suppGroups); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := system.Setgid(execUser.Gid); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := system.Setuid(execUser.Uid); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// if we didn't get HOME already, set it based on the user's HOME
|
||||
if envHome := os.Getenv("HOME"); envHome == "" {
|
||||
if err := os.Setenv("HOME", execUser.Home); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// fixStdioPermissions fixes the permissions of PID 1's STDIO within the container to the specified user.
|
||||
// The ownership needs to match because it is created outside of the container and needs to be
|
||||
// localized.
|
||||
func fixStdioPermissions(config *initConfig, u *user.ExecUser) error {
|
||||
var null unix.Stat_t
|
||||
if err := unix.Stat("/dev/null", &null); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, fd := range []uintptr{
|
||||
os.Stdin.Fd(),
|
||||
os.Stderr.Fd(),
|
||||
os.Stdout.Fd(),
|
||||
} {
|
||||
var s unix.Stat_t
|
||||
if err := unix.Fstat(int(fd), &s); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Skip chown of /dev/null if it was used as one of the STDIO fds.
|
||||
if s.Rdev == null.Rdev {
|
||||
continue
|
||||
}
|
||||
|
||||
// Skip chown if s.Gid is actually an unmapped gid in the host. While
|
||||
// this is a bit dodgy if it just so happens that the console _is_
|
||||
// owned by overflow_gid, there's no way for us to disambiguate this as
|
||||
// a userspace program.
|
||||
if _, err := config.Config.HostGID(int(s.Gid)); err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// We only change the uid owner (as it is possible for the mount to
|
||||
// prefer a different gid, and there's no reason for us to change it).
|
||||
// The reason why we don't just leave the default uid=X mount setup is
|
||||
// that users expect to be able to actually use their console. Without
|
||||
// this code, you couldn't effectively run as a non-root user inside a
|
||||
// container and also have a console set up.
|
||||
if err := unix.Fchown(int(fd), u.Uid, int(s.Gid)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// setupNetwork sets up and initializes any network interface inside the container.
|
||||
func setupNetwork(config *initConfig) error {
|
||||
for _, config := range config.Networks {
|
||||
strategy, err := getStrategy(config.Type)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := strategy.initialize(config); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func setupRoute(config *configs.Config) error {
|
||||
for _, config := range config.Routes {
|
||||
_, dst, err := net.ParseCIDR(config.Destination)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
src := net.ParseIP(config.Source)
|
||||
if src == nil {
|
||||
return fmt.Errorf("Invalid source for route: %s", config.Source)
|
||||
}
|
||||
gw := net.ParseIP(config.Gateway)
|
||||
if gw == nil {
|
||||
return fmt.Errorf("Invalid gateway for route: %s", config.Gateway)
|
||||
}
|
||||
l, err := netlink.LinkByName(config.InterfaceName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
route := &netlink.Route{
|
||||
Scope: netlink.SCOPE_UNIVERSE,
|
||||
Dst: dst,
|
||||
Src: src,
|
||||
Gw: gw,
|
||||
LinkIndex: l.Attrs().Index,
|
||||
}
|
||||
if err := netlink.RouteAdd(route); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func setupRlimits(limits []configs.Rlimit, pid int) error {
|
||||
for _, rlimit := range limits {
|
||||
if err := system.Prlimit(pid, rlimit.Type, unix.Rlimit{Max: rlimit.Hard, Cur: rlimit.Soft}); err != nil {
|
||||
return fmt.Errorf("error setting rlimit type %v: %v", rlimit.Type, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
const _P_PID = 1
|
||||
|
||||
type siginfo struct {
|
||||
si_signo int32
|
||||
si_errno int32
|
||||
si_code int32
|
||||
// below here is a union; si_pid is the only field we use
|
||||
si_pid int32
|
||||
// Pad to 128 bytes as detailed in blockUntilWaitable
|
||||
pad [96]byte
|
||||
}
|
||||
|
||||
// isWaitable returns true if the process has exited false otherwise.
|
||||
// Its based off blockUntilWaitable in src/os/wait_waitid.go
|
||||
func isWaitable(pid int) (bool, error) {
|
||||
si := &siginfo{}
|
||||
_, _, e := unix.Syscall6(unix.SYS_WAITID, _P_PID, uintptr(pid), uintptr(unsafe.Pointer(si)), unix.WEXITED|unix.WNOWAIT|unix.WNOHANG, 0, 0)
|
||||
if e != 0 {
|
||||
return false, os.NewSyscallError("waitid", e)
|
||||
}
|
||||
|
||||
return si.si_pid != 0, nil
|
||||
}
|
||||
|
||||
// isNoChildren returns true if err represents a unix.ECHILD (formerly syscall.ECHILD) false otherwise
|
||||
func isNoChildren(err error) bool {
|
||||
switch err := err.(type) {
|
||||
case syscall.Errno:
|
||||
if err == unix.ECHILD {
|
||||
return true
|
||||
}
|
||||
case *os.SyscallError:
|
||||
if err.Err == unix.ECHILD {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// signalAllProcesses freezes then iterates over all the processes inside the
|
||||
// manager's cgroups sending the signal s to them.
|
||||
// If s is SIGKILL then it will wait for each process to exit.
|
||||
// For all other signals it will check if the process is ready to report its
|
||||
// exit status and only if it is will a wait be performed.
|
||||
func signalAllProcesses(m cgroups.Manager, s os.Signal) error {
|
||||
var procs []*os.Process
|
||||
if err := m.Freeze(configs.Frozen); err != nil {
|
||||
logrus.Warn(err)
|
||||
}
|
||||
pids, err := m.GetAllPids()
|
||||
if err != nil {
|
||||
m.Freeze(configs.Thawed)
|
||||
return err
|
||||
}
|
||||
for _, pid := range pids {
|
||||
p, err := os.FindProcess(pid)
|
||||
if err != nil {
|
||||
logrus.Warn(err)
|
||||
continue
|
||||
}
|
||||
procs = append(procs, p)
|
||||
if err := p.Signal(s); err != nil {
|
||||
logrus.Warn(err)
|
||||
}
|
||||
}
|
||||
if err := m.Freeze(configs.Thawed); err != nil {
|
||||
logrus.Warn(err)
|
||||
}
|
||||
|
||||
for _, p := range procs {
|
||||
if s != unix.SIGKILL {
|
||||
if ok, err := isWaitable(p.Pid); err != nil {
|
||||
if !isNoChildren(err) {
|
||||
logrus.Warn("signalAllProcesses: ", p.Pid, err)
|
||||
}
|
||||
continue
|
||||
} else if !ok {
|
||||
// Not ready to report so don't wait
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := p.Wait(); err != nil {
|
||||
if !isNoChildren(err) {
|
||||
logrus.Warn("wait: ", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
87
vendor/github.com/opencontainers/runc/libcontainer/message_linux.go
generated
vendored
87
vendor/github.com/opencontainers/runc/libcontainer/message_linux.go
generated
vendored
@@ -1,87 +0,0 @@
|
||||
// +build linux
|
||||
|
||||
package libcontainer
|
||||
|
||||
import (
|
||||
"github.com/vishvananda/netlink/nl"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// list of known message types we want to send to bootstrap program
|
||||
// The number is randomly chosen to not conflict with known netlink types
|
||||
const (
|
||||
InitMsg uint16 = 62000
|
||||
CloneFlagsAttr uint16 = 27281
|
||||
NsPathsAttr uint16 = 27282
|
||||
UidmapAttr uint16 = 27283
|
||||
GidmapAttr uint16 = 27284
|
||||
SetgroupAttr uint16 = 27285
|
||||
OomScoreAdjAttr uint16 = 27286
|
||||
RootlessAttr uint16 = 27287
|
||||
)
|
||||
|
||||
type Int32msg struct {
|
||||
Type uint16
|
||||
Value uint32
|
||||
}
|
||||
|
||||
// Serialize serializes the message.
|
||||
// Int32msg has the following representation
|
||||
// | nlattr len | nlattr type |
|
||||
// | uint32 value |
|
||||
func (msg *Int32msg) Serialize() []byte {
|
||||
buf := make([]byte, msg.Len())
|
||||
native := nl.NativeEndian()
|
||||
native.PutUint16(buf[0:2], uint16(msg.Len()))
|
||||
native.PutUint16(buf[2:4], msg.Type)
|
||||
native.PutUint32(buf[4:8], msg.Value)
|
||||
return buf
|
||||
}
|
||||
|
||||
func (msg *Int32msg) Len() int {
|
||||
return unix.NLA_HDRLEN + 4
|
||||
}
|
||||
|
||||
// Bytemsg has the following representation
|
||||
// | nlattr len | nlattr type |
|
||||
// | value | pad |
|
||||
type Bytemsg struct {
|
||||
Type uint16
|
||||
Value []byte
|
||||
}
|
||||
|
||||
func (msg *Bytemsg) Serialize() []byte {
|
||||
l := msg.Len()
|
||||
buf := make([]byte, (l+unix.NLA_ALIGNTO-1) & ^(unix.NLA_ALIGNTO-1))
|
||||
native := nl.NativeEndian()
|
||||
native.PutUint16(buf[0:2], uint16(l))
|
||||
native.PutUint16(buf[2:4], msg.Type)
|
||||
copy(buf[4:], msg.Value)
|
||||
return buf
|
||||
}
|
||||
|
||||
func (msg *Bytemsg) Len() int {
|
||||
return unix.NLA_HDRLEN + len(msg.Value) + 1 // null-terminated
|
||||
}
|
||||
|
||||
type Boolmsg struct {
|
||||
Type uint16
|
||||
Value bool
|
||||
}
|
||||
|
||||
func (msg *Boolmsg) Serialize() []byte {
|
||||
buf := make([]byte, msg.Len())
|
||||
native := nl.NativeEndian()
|
||||
native.PutUint16(buf[0:2], uint16(msg.Len()))
|
||||
native.PutUint16(buf[2:4], msg.Type)
|
||||
if msg.Value {
|
||||
buf[4] = 1
|
||||
} else {
|
||||
buf[4] = 0
|
||||
}
|
||||
return buf
|
||||
}
|
||||
|
||||
func (msg *Boolmsg) Len() int {
|
||||
return unix.NLA_HDRLEN + 1
|
||||
}
|
||||
259
vendor/github.com/opencontainers/runc/libcontainer/network_linux.go
generated
vendored
259
vendor/github.com/opencontainers/runc/libcontainer/network_linux.go
generated
vendored
@@ -1,259 +0,0 @@
|
||||
// +build linux
|
||||
|
||||
package libcontainer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
"github.com/opencontainers/runc/libcontainer/utils"
|
||||
"github.com/vishvananda/netlink"
|
||||
)
|
||||
|
||||
var strategies = map[string]networkStrategy{
|
||||
"veth": &veth{},
|
||||
"loopback": &loopback{},
|
||||
}
|
||||
|
||||
// networkStrategy represents a specific network configuration for
|
||||
// a container's networking stack
|
||||
type networkStrategy interface {
|
||||
create(*network, int) error
|
||||
initialize(*network) error
|
||||
detach(*configs.Network) error
|
||||
attach(*configs.Network) error
|
||||
}
|
||||
|
||||
// getStrategy returns the specific network strategy for the
|
||||
// provided type.
|
||||
func getStrategy(tpe string) (networkStrategy, error) {
|
||||
s, exists := strategies[tpe]
|
||||
if !exists {
|
||||
return nil, fmt.Errorf("unknown strategy type %q", tpe)
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Returns the network statistics for the network interfaces represented by the NetworkRuntimeInfo.
|
||||
func getNetworkInterfaceStats(interfaceName string) (*NetworkInterface, error) {
|
||||
out := &NetworkInterface{Name: interfaceName}
|
||||
// This can happen if the network runtime information is missing - possible if the
|
||||
// container was created by an old version of libcontainer.
|
||||
if interfaceName == "" {
|
||||
return out, nil
|
||||
}
|
||||
type netStatsPair struct {
|
||||
// Where to write the output.
|
||||
Out *uint64
|
||||
// The network stats file to read.
|
||||
File string
|
||||
}
|
||||
// Ingress for host veth is from the container. Hence tx_bytes stat on the host veth is actually number of bytes received by the container.
|
||||
netStats := []netStatsPair{
|
||||
{Out: &out.RxBytes, File: "tx_bytes"},
|
||||
{Out: &out.RxPackets, File: "tx_packets"},
|
||||
{Out: &out.RxErrors, File: "tx_errors"},
|
||||
{Out: &out.RxDropped, File: "tx_dropped"},
|
||||
|
||||
{Out: &out.TxBytes, File: "rx_bytes"},
|
||||
{Out: &out.TxPackets, File: "rx_packets"},
|
||||
{Out: &out.TxErrors, File: "rx_errors"},
|
||||
{Out: &out.TxDropped, File: "rx_dropped"},
|
||||
}
|
||||
for _, netStat := range netStats {
|
||||
data, err := readSysfsNetworkStats(interfaceName, netStat.File)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
*(netStat.Out) = data
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// Reads the specified statistics available under /sys/class/net/<EthInterface>/statistics
|
||||
func readSysfsNetworkStats(ethInterface, statsFile string) (uint64, error) {
|
||||
data, err := ioutil.ReadFile(filepath.Join("/sys/class/net", ethInterface, "statistics", statsFile))
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return strconv.ParseUint(strings.TrimSpace(string(data)), 10, 64)
|
||||
}
|
||||
|
||||
// loopback is a network strategy that provides a basic loopback device
|
||||
type loopback struct {
|
||||
}
|
||||
|
||||
func (l *loopback) create(n *network, nspid int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopback) initialize(config *network) error {
|
||||
return netlink.LinkSetUp(&netlink.Device{LinkAttrs: netlink.LinkAttrs{Name: "lo"}})
|
||||
}
|
||||
|
||||
func (l *loopback) attach(n *configs.Network) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *loopback) detach(n *configs.Network) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// veth is a network strategy that uses a bridge and creates
|
||||
// a veth pair, one that is attached to the bridge on the host and the other
|
||||
// is placed inside the container's namespace
|
||||
type veth struct {
|
||||
}
|
||||
|
||||
func (v *veth) detach(n *configs.Network) (err error) {
|
||||
return netlink.LinkSetMaster(&netlink.Device{LinkAttrs: netlink.LinkAttrs{Name: n.HostInterfaceName}}, nil)
|
||||
}
|
||||
|
||||
// attach a container network interface to an external network
|
||||
func (v *veth) attach(n *configs.Network) (err error) {
|
||||
brl, err := netlink.LinkByName(n.Bridge)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
br, ok := brl.(*netlink.Bridge)
|
||||
if !ok {
|
||||
return fmt.Errorf("Wrong device type %T", brl)
|
||||
}
|
||||
host, err := netlink.LinkByName(n.HostInterfaceName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := netlink.LinkSetMaster(host, br); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := netlink.LinkSetMTU(host, n.Mtu); err != nil {
|
||||
return err
|
||||
}
|
||||
if n.HairpinMode {
|
||||
if err := netlink.LinkSetHairpin(host, true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := netlink.LinkSetUp(host); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *veth) create(n *network, nspid int) (err error) {
|
||||
tmpName, err := v.generateTempPeerName()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
n.TempVethPeerName = tmpName
|
||||
if n.Bridge == "" {
|
||||
return fmt.Errorf("bridge is not specified")
|
||||
}
|
||||
veth := &netlink.Veth{
|
||||
LinkAttrs: netlink.LinkAttrs{
|
||||
Name: n.HostInterfaceName,
|
||||
TxQLen: n.TxQueueLen,
|
||||
},
|
||||
PeerName: n.TempVethPeerName,
|
||||
}
|
||||
if err := netlink.LinkAdd(veth); err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
netlink.LinkDel(veth)
|
||||
}
|
||||
}()
|
||||
if err := v.attach(&n.Network); err != nil {
|
||||
return err
|
||||
}
|
||||
child, err := netlink.LinkByName(n.TempVethPeerName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return netlink.LinkSetNsPid(child, nspid)
|
||||
}
|
||||
|
||||
func (v *veth) generateTempPeerName() (string, error) {
|
||||
return utils.GenerateRandomName("veth", 7)
|
||||
}
|
||||
|
||||
func (v *veth) initialize(config *network) error {
|
||||
peer := config.TempVethPeerName
|
||||
if peer == "" {
|
||||
return fmt.Errorf("peer is not specified")
|
||||
}
|
||||
child, err := netlink.LinkByName(peer)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := netlink.LinkSetDown(child); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := netlink.LinkSetName(child, config.Name); err != nil {
|
||||
return err
|
||||
}
|
||||
// get the interface again after we changed the name as the index also changes.
|
||||
if child, err = netlink.LinkByName(config.Name); err != nil {
|
||||
return err
|
||||
}
|
||||
if config.MacAddress != "" {
|
||||
mac, err := net.ParseMAC(config.MacAddress)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := netlink.LinkSetHardwareAddr(child, mac); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
ip, err := netlink.ParseAddr(config.Address)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := netlink.AddrAdd(child, ip); err != nil {
|
||||
return err
|
||||
}
|
||||
if config.IPv6Address != "" {
|
||||
ip6, err := netlink.ParseAddr(config.IPv6Address)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := netlink.AddrAdd(child, ip6); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := netlink.LinkSetMTU(child, config.Mtu); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := netlink.LinkSetUp(child); err != nil {
|
||||
return err
|
||||
}
|
||||
if config.Gateway != "" {
|
||||
gw := net.ParseIP(config.Gateway)
|
||||
if err := netlink.RouteAdd(&netlink.Route{
|
||||
Scope: netlink.SCOPE_UNIVERSE,
|
||||
LinkIndex: child.Attrs().Index,
|
||||
Gw: gw,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if config.IPv6Gateway != "" {
|
||||
gw := net.ParseIP(config.IPv6Gateway)
|
||||
if err := netlink.RouteAdd(&netlink.Route{
|
||||
Scope: netlink.SCOPE_UNIVERSE,
|
||||
LinkIndex: child.Attrs().Index,
|
||||
Gw: gw,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
90
vendor/github.com/opencontainers/runc/libcontainer/notify_linux.go
generated
vendored
90
vendor/github.com/opencontainers/runc/libcontainer/notify_linux.go
generated
vendored
@@ -1,90 +0,0 @@
|
||||
// +build linux
|
||||
|
||||
package libcontainer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const oomCgroupName = "memory"
|
||||
|
||||
type PressureLevel uint
|
||||
|
||||
const (
|
||||
LowPressure PressureLevel = iota
|
||||
MediumPressure
|
||||
CriticalPressure
|
||||
)
|
||||
|
||||
func registerMemoryEvent(cgDir string, evName string, arg string) (<-chan struct{}, error) {
|
||||
evFile, err := os.Open(filepath.Join(cgDir, evName))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fd, _, syserr := unix.RawSyscall(unix.SYS_EVENTFD2, 0, unix.FD_CLOEXEC, 0)
|
||||
if syserr != 0 {
|
||||
evFile.Close()
|
||||
return nil, syserr
|
||||
}
|
||||
|
||||
eventfd := os.NewFile(fd, "eventfd")
|
||||
|
||||
eventControlPath := filepath.Join(cgDir, "cgroup.event_control")
|
||||
data := fmt.Sprintf("%d %d %s", eventfd.Fd(), evFile.Fd(), arg)
|
||||
if err := ioutil.WriteFile(eventControlPath, []byte(data), 0700); err != nil {
|
||||
eventfd.Close()
|
||||
evFile.Close()
|
||||
return nil, err
|
||||
}
|
||||
ch := make(chan struct{})
|
||||
go func() {
|
||||
defer func() {
|
||||
close(ch)
|
||||
eventfd.Close()
|
||||
evFile.Close()
|
||||
}()
|
||||
buf := make([]byte, 8)
|
||||
for {
|
||||
if _, err := eventfd.Read(buf); err != nil {
|
||||
return
|
||||
}
|
||||
// When a cgroup is destroyed, an event is sent to eventfd.
|
||||
// So if the control path is gone, return instead of notifying.
|
||||
if _, err := os.Lstat(eventControlPath); os.IsNotExist(err) {
|
||||
return
|
||||
}
|
||||
ch <- struct{}{}
|
||||
}
|
||||
}()
|
||||
return ch, nil
|
||||
}
|
||||
|
||||
// notifyOnOOM returns channel on which you can expect event about OOM,
|
||||
// if process died without OOM this channel will be closed.
|
||||
func notifyOnOOM(paths map[string]string) (<-chan struct{}, error) {
|
||||
dir := paths[oomCgroupName]
|
||||
if dir == "" {
|
||||
return nil, fmt.Errorf("path %q missing", oomCgroupName)
|
||||
}
|
||||
|
||||
return registerMemoryEvent(dir, "memory.oom_control", "")
|
||||
}
|
||||
|
||||
func notifyMemoryPressure(paths map[string]string, level PressureLevel) (<-chan struct{}, error) {
|
||||
dir := paths[oomCgroupName]
|
||||
if dir == "" {
|
||||
return nil, fmt.Errorf("path %q missing", oomCgroupName)
|
||||
}
|
||||
|
||||
if level > CriticalPressure {
|
||||
return nil, fmt.Errorf("invalid pressure level %d", level)
|
||||
}
|
||||
|
||||
levelStr := []string{"low", "medium", "critical"}[level]
|
||||
return registerMemoryEvent(dir, "memory.pressure_level", levelStr)
|
||||
}
|
||||
106
vendor/github.com/opencontainers/runc/libcontainer/process.go
generated
vendored
106
vendor/github.com/opencontainers/runc/libcontainer/process.go
generated
vendored
@@ -1,106 +0,0 @@
|
||||
package libcontainer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"os"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
)
|
||||
|
||||
type processOperations interface {
|
||||
wait() (*os.ProcessState, error)
|
||||
signal(sig os.Signal) error
|
||||
pid() int
|
||||
}
|
||||
|
||||
// Process specifies the configuration and IO for a process inside
|
||||
// a container.
|
||||
type Process struct {
|
||||
// The command to be run followed by any arguments.
|
||||
Args []string
|
||||
|
||||
// Env specifies the environment variables for the process.
|
||||
Env []string
|
||||
|
||||
// User will set the uid and gid of the executing process running inside the container
|
||||
// local to the container's user and group configuration.
|
||||
User string
|
||||
|
||||
// AdditionalGroups specifies the gids that should be added to supplementary groups
|
||||
// in addition to those that the user belongs to.
|
||||
AdditionalGroups []string
|
||||
|
||||
// Cwd will change the processes current working directory inside the container's rootfs.
|
||||
Cwd string
|
||||
|
||||
// Stdin is a pointer to a reader which provides the standard input stream.
|
||||
Stdin io.Reader
|
||||
|
||||
// Stdout is a pointer to a writer which receives the standard output stream.
|
||||
Stdout io.Writer
|
||||
|
||||
// Stderr is a pointer to a writer which receives the standard error stream.
|
||||
Stderr io.Writer
|
||||
|
||||
// ExtraFiles specifies additional open files to be inherited by the container
|
||||
ExtraFiles []*os.File
|
||||
|
||||
// Capabilities specify the capabilities to keep when executing the process inside the container
|
||||
// All capabilities not specified will be dropped from the processes capability mask
|
||||
Capabilities *configs.Capabilities
|
||||
|
||||
// AppArmorProfile specifies the profile to apply to the process and is
|
||||
// changed at the time the process is execed
|
||||
AppArmorProfile string
|
||||
|
||||
// Label specifies the label to apply to the process. It is commonly used by selinux
|
||||
Label string
|
||||
|
||||
// NoNewPrivileges controls whether processes can gain additional privileges.
|
||||
NoNewPrivileges *bool
|
||||
|
||||
// Rlimits specifies the resource limits, such as max open files, to set in the container
|
||||
// If Rlimits are not set, the container will inherit rlimits from the parent process
|
||||
Rlimits []configs.Rlimit
|
||||
|
||||
// ConsoleSocket provides the masterfd console.
|
||||
ConsoleSocket *os.File
|
||||
|
||||
ops processOperations
|
||||
}
|
||||
|
||||
// Wait waits for the process to exit.
|
||||
// Wait releases any resources associated with the Process
|
||||
func (p Process) Wait() (*os.ProcessState, error) {
|
||||
if p.ops == nil {
|
||||
return nil, newGenericError(fmt.Errorf("invalid process"), NoProcessOps)
|
||||
}
|
||||
return p.ops.wait()
|
||||
}
|
||||
|
||||
// Pid returns the process ID
|
||||
func (p Process) Pid() (int, error) {
|
||||
// math.MinInt32 is returned here, because it's invalid value
|
||||
// for the kill() system call.
|
||||
if p.ops == nil {
|
||||
return math.MinInt32, newGenericError(fmt.Errorf("invalid process"), NoProcessOps)
|
||||
}
|
||||
return p.ops.pid(), nil
|
||||
}
|
||||
|
||||
// Signal sends a signal to the Process.
|
||||
func (p Process) Signal(sig os.Signal) error {
|
||||
if p.ops == nil {
|
||||
return newGenericError(fmt.Errorf("invalid process"), NoProcessOps)
|
||||
}
|
||||
return p.ops.signal(sig)
|
||||
}
|
||||
|
||||
// IO holds the process's STDIO
|
||||
type IO struct {
|
||||
Stdin io.WriteCloser
|
||||
Stdout io.ReadCloser
|
||||
Stderr io.ReadCloser
|
||||
}
|
||||
491
vendor/github.com/opencontainers/runc/libcontainer/process_linux.go
generated
vendored
491
vendor/github.com/opencontainers/runc/libcontainer/process_linux.go
generated
vendored
@@ -1,491 +0,0 @@
|
||||
// +build linux
|
||||
|
||||
package libcontainer
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"syscall" // only for Signal
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
"github.com/opencontainers/runc/libcontainer/system"
|
||||
"github.com/opencontainers/runc/libcontainer/utils"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
type parentProcess interface {
|
||||
// pid returns the pid for the running process.
|
||||
pid() int
|
||||
|
||||
// start starts the process execution.
|
||||
start() error
|
||||
|
||||
// send a SIGKILL to the process and wait for the exit.
|
||||
terminate() error
|
||||
|
||||
// wait waits on the process returning the process state.
|
||||
wait() (*os.ProcessState, error)
|
||||
|
||||
// startTime returns the process start time.
|
||||
startTime() (string, error)
|
||||
|
||||
signal(os.Signal) error
|
||||
|
||||
externalDescriptors() []string
|
||||
|
||||
setExternalDescriptors(fds []string)
|
||||
}
|
||||
|
||||
type setnsProcess struct {
|
||||
cmd *exec.Cmd
|
||||
parentPipe *os.File
|
||||
childPipe *os.File
|
||||
cgroupPaths map[string]string
|
||||
config *initConfig
|
||||
fds []string
|
||||
process *Process
|
||||
bootstrapData io.Reader
|
||||
}
|
||||
|
||||
func (p *setnsProcess) startTime() (string, error) {
|
||||
return system.GetProcessStartTime(p.pid())
|
||||
}
|
||||
|
||||
func (p *setnsProcess) signal(sig os.Signal) error {
|
||||
s, ok := sig.(syscall.Signal)
|
||||
if !ok {
|
||||
return errors.New("os: unsupported signal type")
|
||||
}
|
||||
return unix.Kill(p.pid(), s)
|
||||
}
|
||||
|
||||
func (p *setnsProcess) start() (err error) {
|
||||
defer p.parentPipe.Close()
|
||||
err = p.cmd.Start()
|
||||
p.childPipe.Close()
|
||||
if err != nil {
|
||||
return newSystemErrorWithCause(err, "starting setns process")
|
||||
}
|
||||
if p.bootstrapData != nil {
|
||||
if _, err := io.Copy(p.parentPipe, p.bootstrapData); err != nil {
|
||||
return newSystemErrorWithCause(err, "copying bootstrap data to pipe")
|
||||
}
|
||||
}
|
||||
if err = p.execSetns(); err != nil {
|
||||
return newSystemErrorWithCause(err, "executing setns process")
|
||||
}
|
||||
// We can't join cgroups if we're in a rootless container.
|
||||
if !p.config.Rootless && len(p.cgroupPaths) > 0 {
|
||||
if err := cgroups.EnterPid(p.cgroupPaths, p.pid()); err != nil {
|
||||
return newSystemErrorWithCausef(err, "adding pid %d to cgroups", p.pid())
|
||||
}
|
||||
}
|
||||
// set rlimits, this has to be done here because we lose permissions
|
||||
// to raise the limits once we enter a user-namespace
|
||||
if err := setupRlimits(p.config.Rlimits, p.pid()); err != nil {
|
||||
return newSystemErrorWithCause(err, "setting rlimits for process")
|
||||
}
|
||||
if err := utils.WriteJSON(p.parentPipe, p.config); err != nil {
|
||||
return newSystemErrorWithCause(err, "writing config to pipe")
|
||||
}
|
||||
|
||||
ierr := parseSync(p.parentPipe, func(sync *syncT) error {
|
||||
switch sync.Type {
|
||||
case procReady:
|
||||
// This shouldn't happen.
|
||||
panic("unexpected procReady in setns")
|
||||
case procHooks:
|
||||
// This shouldn't happen.
|
||||
panic("unexpected procHooks in setns")
|
||||
default:
|
||||
return newSystemError(fmt.Errorf("invalid JSON payload from child"))
|
||||
}
|
||||
})
|
||||
|
||||
if err := unix.Shutdown(int(p.parentPipe.Fd()), unix.SHUT_WR); err != nil {
|
||||
return newSystemErrorWithCause(err, "calling shutdown on init pipe")
|
||||
}
|
||||
// Must be done after Shutdown so the child will exit and we can wait for it.
|
||||
if ierr != nil {
|
||||
p.wait()
|
||||
return ierr
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// execSetns runs the process that executes C code to perform the setns calls
|
||||
// because setns support requires the C process to fork off a child and perform the setns
|
||||
// before the go runtime boots, we wait on the process to die and receive the child's pid
|
||||
// over the provided pipe.
|
||||
func (p *setnsProcess) execSetns() error {
|
||||
status, err := p.cmd.Process.Wait()
|
||||
if err != nil {
|
||||
p.cmd.Wait()
|
||||
return newSystemErrorWithCause(err, "waiting on setns process to finish")
|
||||
}
|
||||
if !status.Success() {
|
||||
p.cmd.Wait()
|
||||
return newSystemError(&exec.ExitError{ProcessState: status})
|
||||
}
|
||||
var pid *pid
|
||||
if err := json.NewDecoder(p.parentPipe).Decode(&pid); err != nil {
|
||||
p.cmd.Wait()
|
||||
return newSystemErrorWithCause(err, "reading pid from init pipe")
|
||||
}
|
||||
process, err := os.FindProcess(pid.Pid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.cmd.Process = process
|
||||
p.process.ops = p
|
||||
return nil
|
||||
}
|
||||
|
||||
// terminate sends a SIGKILL to the forked process for the setns routine then waits to
|
||||
// avoid the process becoming a zombie.
|
||||
func (p *setnsProcess) terminate() error {
|
||||
if p.cmd.Process == nil {
|
||||
return nil
|
||||
}
|
||||
err := p.cmd.Process.Kill()
|
||||
if _, werr := p.wait(); err == nil {
|
||||
err = werr
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *setnsProcess) wait() (*os.ProcessState, error) {
|
||||
err := p.cmd.Wait()
|
||||
|
||||
// Return actual ProcessState even on Wait error
|
||||
return p.cmd.ProcessState, err
|
||||
}
|
||||
|
||||
func (p *setnsProcess) pid() int {
|
||||
return p.cmd.Process.Pid
|
||||
}
|
||||
|
||||
func (p *setnsProcess) externalDescriptors() []string {
|
||||
return p.fds
|
||||
}
|
||||
|
||||
func (p *setnsProcess) setExternalDescriptors(newFds []string) {
|
||||
p.fds = newFds
|
||||
}
|
||||
|
||||
type initProcess struct {
|
||||
cmd *exec.Cmd
|
||||
parentPipe *os.File
|
||||
childPipe *os.File
|
||||
config *initConfig
|
||||
manager cgroups.Manager
|
||||
container *linuxContainer
|
||||
fds []string
|
||||
process *Process
|
||||
bootstrapData io.Reader
|
||||
sharePidns bool
|
||||
rootDir *os.File
|
||||
}
|
||||
|
||||
func (p *initProcess) pid() int {
|
||||
return p.cmd.Process.Pid
|
||||
}
|
||||
|
||||
func (p *initProcess) externalDescriptors() []string {
|
||||
return p.fds
|
||||
}
|
||||
|
||||
// execSetns runs the process that executes C code to perform the setns calls
|
||||
// because setns support requires the C process to fork off a child and perform the setns
|
||||
// before the go runtime boots, we wait on the process to die and receive the child's pid
|
||||
// over the provided pipe.
|
||||
// This is called by initProcess.start function
|
||||
func (p *initProcess) execSetns() error {
|
||||
status, err := p.cmd.Process.Wait()
|
||||
if err != nil {
|
||||
p.cmd.Wait()
|
||||
return err
|
||||
}
|
||||
if !status.Success() {
|
||||
p.cmd.Wait()
|
||||
return &exec.ExitError{ProcessState: status}
|
||||
}
|
||||
var pid *pid
|
||||
if err := json.NewDecoder(p.parentPipe).Decode(&pid); err != nil {
|
||||
p.cmd.Wait()
|
||||
return err
|
||||
}
|
||||
process, err := os.FindProcess(pid.Pid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.cmd.Process = process
|
||||
p.process.ops = p
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *initProcess) start() error {
|
||||
defer p.parentPipe.Close()
|
||||
err := p.cmd.Start()
|
||||
p.process.ops = p
|
||||
p.childPipe.Close()
|
||||
p.rootDir.Close()
|
||||
if err != nil {
|
||||
p.process.ops = nil
|
||||
return newSystemErrorWithCause(err, "starting init process command")
|
||||
}
|
||||
if _, err := io.Copy(p.parentPipe, p.bootstrapData); err != nil {
|
||||
return newSystemErrorWithCause(err, "copying bootstrap data to pipe")
|
||||
}
|
||||
if err := p.execSetns(); err != nil {
|
||||
return newSystemErrorWithCause(err, "running exec setns process for init")
|
||||
}
|
||||
// Save the standard descriptor names before the container process
|
||||
// can potentially move them (e.g., via dup2()). If we don't do this now,
|
||||
// we won't know at checkpoint time which file descriptor to look up.
|
||||
fds, err := getPipeFds(p.pid())
|
||||
if err != nil {
|
||||
return newSystemErrorWithCausef(err, "getting pipe fds for pid %d", p.pid())
|
||||
}
|
||||
p.setExternalDescriptors(fds)
|
||||
// Do this before syncing with child so that no children can escape the
|
||||
// cgroup. We don't need to worry about not doing this and not being root
|
||||
// because we'd be using the rootless cgroup manager in that case.
|
||||
if err := p.manager.Apply(p.pid()); err != nil {
|
||||
return newSystemErrorWithCause(err, "applying cgroup configuration for process")
|
||||
}
|
||||
defer func() {
|
||||
if err != nil {
|
||||
// TODO: should not be the responsibility to call here
|
||||
p.manager.Destroy()
|
||||
}
|
||||
}()
|
||||
if err := p.createNetworkInterfaces(); err != nil {
|
||||
return newSystemErrorWithCause(err, "creating network interfaces")
|
||||
}
|
||||
if err := p.sendConfig(); err != nil {
|
||||
return newSystemErrorWithCause(err, "sending config to init process")
|
||||
}
|
||||
var (
|
||||
sentRun bool
|
||||
sentResume bool
|
||||
)
|
||||
|
||||
ierr := parseSync(p.parentPipe, func(sync *syncT) error {
|
||||
switch sync.Type {
|
||||
case procReady:
|
||||
// set rlimits, this has to be done here because we lose permissions
|
||||
// to raise the limits once we enter a user-namespace
|
||||
if err := setupRlimits(p.config.Rlimits, p.pid()); err != nil {
|
||||
return newSystemErrorWithCause(err, "setting rlimits for ready process")
|
||||
}
|
||||
// call prestart hooks
|
||||
if !p.config.Config.Namespaces.Contains(configs.NEWNS) {
|
||||
// Setup cgroup before prestart hook, so that the prestart hook could apply cgroup permissions.
|
||||
if err := p.manager.Set(p.config.Config); err != nil {
|
||||
return newSystemErrorWithCause(err, "setting cgroup config for ready process")
|
||||
}
|
||||
|
||||
if p.config.Config.Hooks != nil {
|
||||
s := configs.HookState{
|
||||
Version: p.container.config.Version,
|
||||
ID: p.container.id,
|
||||
Pid: p.pid(),
|
||||
Bundle: utils.SearchLabels(p.config.Config.Labels, "bundle"),
|
||||
}
|
||||
for i, hook := range p.config.Config.Hooks.Prestart {
|
||||
if err := hook.Run(s); err != nil {
|
||||
return newSystemErrorWithCausef(err, "running prestart hook %d", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Sync with child.
|
||||
if err := writeSync(p.parentPipe, procRun); err != nil {
|
||||
return newSystemErrorWithCause(err, "writing syncT 'run'")
|
||||
}
|
||||
sentRun = true
|
||||
case procHooks:
|
||||
// Setup cgroup before prestart hook, so that the prestart hook could apply cgroup permissions.
|
||||
if err := p.manager.Set(p.config.Config); err != nil {
|
||||
return newSystemErrorWithCause(err, "setting cgroup config for procHooks process")
|
||||
}
|
||||
if p.config.Config.Hooks != nil {
|
||||
s := configs.HookState{
|
||||
Version: p.container.config.Version,
|
||||
ID: p.container.id,
|
||||
Pid: p.pid(),
|
||||
Bundle: utils.SearchLabels(p.config.Config.Labels, "bundle"),
|
||||
}
|
||||
for i, hook := range p.config.Config.Hooks.Prestart {
|
||||
if err := hook.Run(s); err != nil {
|
||||
return newSystemErrorWithCausef(err, "running prestart hook %d", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Sync with child.
|
||||
if err := writeSync(p.parentPipe, procResume); err != nil {
|
||||
return newSystemErrorWithCause(err, "writing syncT 'resume'")
|
||||
}
|
||||
sentResume = true
|
||||
default:
|
||||
return newSystemError(fmt.Errorf("invalid JSON payload from child"))
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
if !sentRun {
|
||||
return newSystemErrorWithCause(ierr, "container init")
|
||||
}
|
||||
if p.config.Config.Namespaces.Contains(configs.NEWNS) && !sentResume {
|
||||
return newSystemError(fmt.Errorf("could not synchronise after executing prestart hooks with container process"))
|
||||
}
|
||||
if err := unix.Shutdown(int(p.parentPipe.Fd()), unix.SHUT_WR); err != nil {
|
||||
return newSystemErrorWithCause(err, "shutting down init pipe")
|
||||
}
|
||||
|
||||
// Must be done after Shutdown so the child will exit and we can wait for it.
|
||||
if ierr != nil {
|
||||
p.wait()
|
||||
return ierr
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *initProcess) wait() (*os.ProcessState, error) {
|
||||
err := p.cmd.Wait()
|
||||
if err != nil {
|
||||
return p.cmd.ProcessState, err
|
||||
}
|
||||
// we should kill all processes in cgroup when init is died if we use host PID namespace
|
||||
if p.sharePidns {
|
||||
signalAllProcesses(p.manager, unix.SIGKILL)
|
||||
}
|
||||
return p.cmd.ProcessState, nil
|
||||
}
|
||||
|
||||
func (p *initProcess) terminate() error {
|
||||
if p.cmd.Process == nil {
|
||||
return nil
|
||||
}
|
||||
err := p.cmd.Process.Kill()
|
||||
if _, werr := p.wait(); err == nil {
|
||||
err = werr
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *initProcess) startTime() (string, error) {
|
||||
return system.GetProcessStartTime(p.pid())
|
||||
}
|
||||
|
||||
func (p *initProcess) sendConfig() error {
|
||||
// send the config to the container's init process, we don't use JSON Encode
|
||||
// here because there might be a problem in JSON decoder in some cases, see:
|
||||
// https://github.com/docker/docker/issues/14203#issuecomment-174177790
|
||||
return utils.WriteJSON(p.parentPipe, p.config)
|
||||
}
|
||||
|
||||
func (p *initProcess) createNetworkInterfaces() error {
|
||||
for _, config := range p.config.Config.Networks {
|
||||
strategy, err := getStrategy(config.Type)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
n := &network{
|
||||
Network: *config,
|
||||
}
|
||||
if err := strategy.create(n, p.pid()); err != nil {
|
||||
return err
|
||||
}
|
||||
p.config.Networks = append(p.config.Networks, n)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *initProcess) signal(sig os.Signal) error {
|
||||
s, ok := sig.(syscall.Signal)
|
||||
if !ok {
|
||||
return errors.New("os: unsupported signal type")
|
||||
}
|
||||
return unix.Kill(p.pid(), s)
|
||||
}
|
||||
|
||||
func (p *initProcess) setExternalDescriptors(newFds []string) {
|
||||
p.fds = newFds
|
||||
}
|
||||
|
||||
func getPipeFds(pid int) ([]string, error) {
|
||||
fds := make([]string, 3)
|
||||
|
||||
dirPath := filepath.Join("/proc", strconv.Itoa(pid), "/fd")
|
||||
for i := 0; i < 3; i++ {
|
||||
// XXX: This breaks if the path is not a valid symlink (which can
|
||||
// happen in certain particularly unlucky mount namespace setups).
|
||||
f := filepath.Join(dirPath, strconv.Itoa(i))
|
||||
target, err := os.Readlink(f)
|
||||
if err != nil {
|
||||
// Ignore permission errors, for rootless containers and other
|
||||
// non-dumpable processes. if we can't get the fd for a particular
|
||||
// file, there's not much we can do.
|
||||
if os.IsPermission(err) {
|
||||
continue
|
||||
}
|
||||
return fds, err
|
||||
}
|
||||
fds[i] = target
|
||||
}
|
||||
return fds, nil
|
||||
}
|
||||
|
||||
// InitializeIO creates pipes for use with the process's stdio and returns the
|
||||
// opposite side for each. Do not use this if you want to have a pseudoterminal
|
||||
// set up for you by libcontainer (TODO: fix that too).
|
||||
// TODO: This is mostly unnecessary, and should be handled by clients.
|
||||
func (p *Process) InitializeIO(rootuid, rootgid int) (i *IO, err error) {
|
||||
var fds []uintptr
|
||||
i = &IO{}
|
||||
// cleanup in case of an error
|
||||
defer func() {
|
||||
if err != nil {
|
||||
for _, fd := range fds {
|
||||
unix.Close(int(fd))
|
||||
}
|
||||
}
|
||||
}()
|
||||
// STDIN
|
||||
r, w, err := os.Pipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fds = append(fds, r.Fd(), w.Fd())
|
||||
p.Stdin, i.Stdin = r, w
|
||||
// STDOUT
|
||||
if r, w, err = os.Pipe(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fds = append(fds, r.Fd(), w.Fd())
|
||||
p.Stdout, i.Stdout = w, r
|
||||
// STDERR
|
||||
if r, w, err = os.Pipe(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
fds = append(fds, r.Fd(), w.Fd())
|
||||
p.Stderr, i.Stderr = w, r
|
||||
// change ownership of the pipes incase we are in a user namespace
|
||||
for _, fd := range fds {
|
||||
if err := unix.Fchown(int(fd), rootuid, rootgid); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
122
vendor/github.com/opencontainers/runc/libcontainer/restored_process.go
generated
vendored
122
vendor/github.com/opencontainers/runc/libcontainer/restored_process.go
generated
vendored
@@ -1,122 +0,0 @@
|
||||
// +build linux
|
||||
|
||||
package libcontainer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/system"
|
||||
)
|
||||
|
||||
func newRestoredProcess(pid int, fds []string) (*restoredProcess, error) {
|
||||
var (
|
||||
err error
|
||||
)
|
||||
proc, err := os.FindProcess(pid)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
started, err := system.GetProcessStartTime(pid)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &restoredProcess{
|
||||
proc: proc,
|
||||
processStartTime: started,
|
||||
fds: fds,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type restoredProcess struct {
|
||||
proc *os.Process
|
||||
processStartTime string
|
||||
fds []string
|
||||
}
|
||||
|
||||
func (p *restoredProcess) start() error {
|
||||
return newGenericError(fmt.Errorf("restored process cannot be started"), SystemError)
|
||||
}
|
||||
|
||||
func (p *restoredProcess) pid() int {
|
||||
return p.proc.Pid
|
||||
}
|
||||
|
||||
func (p *restoredProcess) terminate() error {
|
||||
err := p.proc.Kill()
|
||||
if _, werr := p.wait(); err == nil {
|
||||
err = werr
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *restoredProcess) wait() (*os.ProcessState, error) {
|
||||
// TODO: how do we wait on the actual process?
|
||||
// maybe use --exec-cmd in criu
|
||||
st, err := p.proc.Wait()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return st, nil
|
||||
}
|
||||
|
||||
func (p *restoredProcess) startTime() (string, error) {
|
||||
return p.processStartTime, nil
|
||||
}
|
||||
|
||||
func (p *restoredProcess) signal(s os.Signal) error {
|
||||
return p.proc.Signal(s)
|
||||
}
|
||||
|
||||
func (p *restoredProcess) externalDescriptors() []string {
|
||||
return p.fds
|
||||
}
|
||||
|
||||
func (p *restoredProcess) setExternalDescriptors(newFds []string) {
|
||||
p.fds = newFds
|
||||
}
|
||||
|
||||
// nonChildProcess represents a process where the calling process is not
|
||||
// the parent process. This process is created when a factory loads a container from
|
||||
// a persisted state.
|
||||
type nonChildProcess struct {
|
||||
processPid int
|
||||
processStartTime string
|
||||
fds []string
|
||||
}
|
||||
|
||||
func (p *nonChildProcess) start() error {
|
||||
return newGenericError(fmt.Errorf("restored process cannot be started"), SystemError)
|
||||
}
|
||||
|
||||
func (p *nonChildProcess) pid() int {
|
||||
return p.processPid
|
||||
}
|
||||
|
||||
func (p *nonChildProcess) terminate() error {
|
||||
return newGenericError(fmt.Errorf("restored process cannot be terminated"), SystemError)
|
||||
}
|
||||
|
||||
func (p *nonChildProcess) wait() (*os.ProcessState, error) {
|
||||
return nil, newGenericError(fmt.Errorf("restored process cannot be waited on"), SystemError)
|
||||
}
|
||||
|
||||
func (p *nonChildProcess) startTime() (string, error) {
|
||||
return p.processStartTime, nil
|
||||
}
|
||||
|
||||
func (p *nonChildProcess) signal(s os.Signal) error {
|
||||
proc, err := os.FindProcess(p.processPid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return proc.Signal(s)
|
||||
}
|
||||
|
||||
func (p *nonChildProcess) externalDescriptors() []string {
|
||||
return p.fds
|
||||
}
|
||||
|
||||
func (p *nonChildProcess) setExternalDescriptors(newFds []string) {
|
||||
p.fds = newFds
|
||||
}
|
||||
812
vendor/github.com/opencontainers/runc/libcontainer/rootfs_linux.go
generated
vendored
812
vendor/github.com/opencontainers/runc/libcontainer/rootfs_linux.go
generated
vendored
@@ -1,812 +0,0 @@
|
||||
// +build linux
|
||||
|
||||
package libcontainer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/mount"
|
||||
"github.com/docker/docker/pkg/symlink"
|
||||
"github.com/mrunalp/fileutils"
|
||||
"github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
"github.com/opencontainers/runc/libcontainer/system"
|
||||
libcontainerUtils "github.com/opencontainers/runc/libcontainer/utils"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
const defaultMountFlags = unix.MS_NOEXEC | unix.MS_NOSUID | unix.MS_NODEV
|
||||
|
||||
// needsSetupDev returns true if /dev needs to be set up.
|
||||
func needsSetupDev(config *configs.Config) bool {
|
||||
for _, m := range config.Mounts {
|
||||
if m.Device == "bind" && libcontainerUtils.CleanPath(m.Destination) == "/dev" {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// prepareRootfs sets up the devices, mount points, and filesystems for use
|
||||
// inside a new mount namespace. It doesn't set anything as ro. You must call
|
||||
// finalizeRootfs after this function to finish setting up the rootfs.
|
||||
func prepareRootfs(pipe io.ReadWriter, config *configs.Config) (err error) {
|
||||
if err := prepareRoot(config); err != nil {
|
||||
return newSystemErrorWithCause(err, "preparing rootfs")
|
||||
}
|
||||
|
||||
setupDev := needsSetupDev(config)
|
||||
for _, m := range config.Mounts {
|
||||
for _, precmd := range m.PremountCmds {
|
||||
if err := mountCmd(precmd); err != nil {
|
||||
return newSystemErrorWithCause(err, "running premount command")
|
||||
}
|
||||
}
|
||||
|
||||
if err := mountToRootfs(m, config.Rootfs, config.MountLabel); err != nil {
|
||||
return newSystemErrorWithCausef(err, "mounting %q to rootfs %q at %q", m.Source, config.Rootfs, m.Destination)
|
||||
}
|
||||
|
||||
for _, postcmd := range m.PostmountCmds {
|
||||
if err := mountCmd(postcmd); err != nil {
|
||||
return newSystemErrorWithCause(err, "running postmount command")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if setupDev {
|
||||
if err := createDevices(config); err != nil {
|
||||
return newSystemErrorWithCause(err, "creating device nodes")
|
||||
}
|
||||
if err := setupPtmx(config); err != nil {
|
||||
return newSystemErrorWithCause(err, "setting up ptmx")
|
||||
}
|
||||
if err := setupDevSymlinks(config.Rootfs); err != nil {
|
||||
return newSystemErrorWithCause(err, "setting up /dev symlinks")
|
||||
}
|
||||
}
|
||||
|
||||
// Signal the parent to run the pre-start hooks.
|
||||
// The hooks are run after the mounts are setup, but before we switch to the new
|
||||
// root, so that the old root is still available in the hooks for any mount
|
||||
// manipulations.
|
||||
if err := syncParentHooks(pipe); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// The reason these operations are done here rather than in finalizeRootfs
|
||||
// is because the console-handling code gets quite sticky if we have to set
|
||||
// up the console before doing the pivot_root(2). This is because the
|
||||
// Console API has to also work with the ExecIn case, which means that the
|
||||
// API must be able to deal with being inside as well as outside the
|
||||
// container. It's just cleaner to do this here (at the expense of the
|
||||
// operation not being perfectly split).
|
||||
|
||||
if err := unix.Chdir(config.Rootfs); err != nil {
|
||||
return newSystemErrorWithCausef(err, "changing dir to %q", config.Rootfs)
|
||||
}
|
||||
|
||||
if config.NoPivotRoot {
|
||||
err = msMoveRoot(config.Rootfs)
|
||||
} else {
|
||||
err = pivotRoot(config.Rootfs)
|
||||
}
|
||||
if err != nil {
|
||||
return newSystemErrorWithCause(err, "jailing process inside rootfs")
|
||||
}
|
||||
|
||||
if setupDev {
|
||||
if err := reOpenDevNull(); err != nil {
|
||||
return newSystemErrorWithCause(err, "reopening /dev/null inside container")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// finalizeRootfs sets anything to ro if necessary. You must call
|
||||
// prepareRootfs first.
|
||||
func finalizeRootfs(config *configs.Config) (err error) {
|
||||
// remount dev as ro if specified
|
||||
for _, m := range config.Mounts {
|
||||
if libcontainerUtils.CleanPath(m.Destination) == "/dev" {
|
||||
if m.Flags&unix.MS_RDONLY == unix.MS_RDONLY {
|
||||
if err := remountReadonly(m); err != nil {
|
||||
return newSystemErrorWithCausef(err, "remounting %q as readonly", m.Destination)
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// set rootfs ( / ) as readonly
|
||||
if config.Readonlyfs {
|
||||
if err := setReadonly(); err != nil {
|
||||
return newSystemErrorWithCause(err, "setting rootfs as readonly")
|
||||
}
|
||||
}
|
||||
|
||||
unix.Umask(0022)
|
||||
return nil
|
||||
}
|
||||
|
||||
func mountCmd(cmd configs.Command) error {
|
||||
command := exec.Command(cmd.Path, cmd.Args[:]...)
|
||||
command.Env = cmd.Env
|
||||
command.Dir = cmd.Dir
|
||||
if out, err := command.CombinedOutput(); err != nil {
|
||||
return fmt.Errorf("%#v failed: %s: %v", cmd, string(out), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func mountToRootfs(m *configs.Mount, rootfs, mountLabel string) error {
|
||||
var (
|
||||
dest = m.Destination
|
||||
)
|
||||
if !strings.HasPrefix(dest, rootfs) {
|
||||
dest = filepath.Join(rootfs, dest)
|
||||
}
|
||||
|
||||
switch m.Device {
|
||||
case "proc", "sysfs":
|
||||
if err := os.MkdirAll(dest, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
// Selinux kernels do not support labeling of /proc or /sys
|
||||
return mountPropagate(m, rootfs, "")
|
||||
case "mqueue":
|
||||
if err := os.MkdirAll(dest, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := mountPropagate(m, rootfs, mountLabel); err != nil {
|
||||
// older kernels do not support labeling of /dev/mqueue
|
||||
if err := mountPropagate(m, rootfs, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
return label.SetFileLabel(dest, mountLabel)
|
||||
}
|
||||
return nil
|
||||
case "tmpfs":
|
||||
copyUp := m.Extensions&configs.EXT_COPYUP == configs.EXT_COPYUP
|
||||
tmpDir := ""
|
||||
stat, err := os.Stat(dest)
|
||||
if err != nil {
|
||||
if err := os.MkdirAll(dest, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if copyUp {
|
||||
tmpDir, err = ioutil.TempDir("/tmp", "runctmpdir")
|
||||
if err != nil {
|
||||
return newSystemErrorWithCause(err, "tmpcopyup: failed to create tmpdir")
|
||||
}
|
||||
defer os.RemoveAll(tmpDir)
|
||||
m.Destination = tmpDir
|
||||
}
|
||||
if err := mountPropagate(m, rootfs, mountLabel); err != nil {
|
||||
return err
|
||||
}
|
||||
if copyUp {
|
||||
if err := fileutils.CopyDirectory(dest, tmpDir); err != nil {
|
||||
errMsg := fmt.Errorf("tmpcopyup: failed to copy %s to %s: %v", dest, tmpDir, err)
|
||||
if err1 := unix.Unmount(tmpDir, unix.MNT_DETACH); err1 != nil {
|
||||
return newSystemErrorWithCausef(err1, "tmpcopyup: %v: failed to unmount", errMsg)
|
||||
}
|
||||
return errMsg
|
||||
}
|
||||
if err := unix.Mount(tmpDir, dest, "", unix.MS_MOVE, ""); err != nil {
|
||||
errMsg := fmt.Errorf("tmpcopyup: failed to move mount %s to %s: %v", tmpDir, dest, err)
|
||||
if err1 := unix.Unmount(tmpDir, unix.MNT_DETACH); err1 != nil {
|
||||
return newSystemErrorWithCausef(err1, "tmpcopyup: %v: failed to unmount", errMsg)
|
||||
}
|
||||
return errMsg
|
||||
}
|
||||
}
|
||||
if stat != nil {
|
||||
if err = os.Chmod(dest, stat.Mode()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
case "bind":
|
||||
stat, err := os.Stat(m.Source)
|
||||
if err != nil {
|
||||
// error out if the source of a bind mount does not exist as we will be
|
||||
// unable to bind anything to it.
|
||||
return err
|
||||
}
|
||||
// ensure that the destination of the bind mount is resolved of symlinks at mount time because
|
||||
// any previous mounts can invalidate the next mount's destination.
|
||||
// this can happen when a user specifies mounts within other mounts to cause breakouts or other
|
||||
// evil stuff to try to escape the container's rootfs.
|
||||
if dest, err = symlink.FollowSymlinkInScope(dest, rootfs); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := checkMountDestination(rootfs, dest); err != nil {
|
||||
return err
|
||||
}
|
||||
// update the mount with the correct dest after symlinks are resolved.
|
||||
m.Destination = dest
|
||||
if err := createIfNotExists(dest, stat.IsDir()); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := mountPropagate(m, rootfs, mountLabel); err != nil {
|
||||
return err
|
||||
}
|
||||
// bind mount won't change mount options, we need remount to make mount options effective.
|
||||
// first check that we have non-default options required before attempting a remount
|
||||
if m.Flags&^(unix.MS_REC|unix.MS_REMOUNT|unix.MS_BIND) != 0 {
|
||||
// only remount if unique mount options are set
|
||||
if err := remount(m, rootfs); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if m.Relabel != "" {
|
||||
if err := label.Validate(m.Relabel); err != nil {
|
||||
return err
|
||||
}
|
||||
shared := label.IsShared(m.Relabel)
|
||||
if err := label.Relabel(m.Source, mountLabel, shared); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
case "cgroup":
|
||||
binds, err := getCgroupMounts(m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var merged []string
|
||||
for _, b := range binds {
|
||||
ss := filepath.Base(b.Destination)
|
||||
if strings.Contains(ss, ",") {
|
||||
merged = append(merged, ss)
|
||||
}
|
||||
}
|
||||
tmpfs := &configs.Mount{
|
||||
Source: "tmpfs",
|
||||
Device: "tmpfs",
|
||||
Destination: m.Destination,
|
||||
Flags: defaultMountFlags,
|
||||
Data: "mode=755",
|
||||
PropagationFlags: m.PropagationFlags,
|
||||
}
|
||||
if err := mountToRootfs(tmpfs, rootfs, mountLabel); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, b := range binds {
|
||||
if err := mountToRootfs(b, rootfs, mountLabel); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, mc := range merged {
|
||||
for _, ss := range strings.Split(mc, ",") {
|
||||
// symlink(2) is very dumb, it will just shove the path into
|
||||
// the link and doesn't do any checks or relative path
|
||||
// conversion. Also, don't error out if the cgroup already exists.
|
||||
if err := os.Symlink(mc, filepath.Join(rootfs, m.Destination, ss)); err != nil && !os.IsExist(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
if m.Flags&unix.MS_RDONLY != 0 {
|
||||
// remount cgroup root as readonly
|
||||
mcgrouproot := &configs.Mount{
|
||||
Source: m.Destination,
|
||||
Device: "bind",
|
||||
Destination: m.Destination,
|
||||
Flags: defaultMountFlags | unix.MS_RDONLY | unix.MS_BIND,
|
||||
}
|
||||
if err := remount(mcgrouproot, rootfs); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
default:
|
||||
// ensure that the destination of the mount is resolved of symlinks at mount time because
|
||||
// any previous mounts can invalidate the next mount's destination.
|
||||
// this can happen when a user specifies mounts within other mounts to cause breakouts or other
|
||||
// evil stuff to try to escape the container's rootfs.
|
||||
var err error
|
||||
if dest, err = symlink.FollowSymlinkInScope(dest, rootfs); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := checkMountDestination(rootfs, dest); err != nil {
|
||||
return err
|
||||
}
|
||||
// update the mount with the correct dest after symlinks are resolved.
|
||||
m.Destination = dest
|
||||
if err := os.MkdirAll(dest, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
return mountPropagate(m, rootfs, mountLabel)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getCgroupMounts(m *configs.Mount) ([]*configs.Mount, error) {
|
||||
mounts, err := cgroups.GetCgroupMounts(false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cgroupPaths, err := cgroups.ParseCgroupFile("/proc/self/cgroup")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var binds []*configs.Mount
|
||||
|
||||
for _, mm := range mounts {
|
||||
dir, err := mm.GetOwnCgroup(cgroupPaths)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
relDir, err := filepath.Rel(mm.Root, dir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
binds = append(binds, &configs.Mount{
|
||||
Device: "bind",
|
||||
Source: filepath.Join(mm.Mountpoint, relDir),
|
||||
Destination: filepath.Join(m.Destination, filepath.Base(mm.Mountpoint)),
|
||||
Flags: unix.MS_BIND | unix.MS_REC | m.Flags,
|
||||
PropagationFlags: m.PropagationFlags,
|
||||
})
|
||||
}
|
||||
|
||||
return binds, nil
|
||||
}
|
||||
|
||||
// checkMountDestination checks to ensure that the mount destination is not over the top of /proc.
|
||||
// dest is required to be an abs path and have any symlinks resolved before calling this function.
|
||||
func checkMountDestination(rootfs, dest string) error {
|
||||
invalidDestinations := []string{
|
||||
"/proc",
|
||||
}
|
||||
// White list, it should be sub directories of invalid destinations
|
||||
validDestinations := []string{
|
||||
// These entries can be bind mounted by files emulated by fuse,
|
||||
// so commands like top, free displays stats in container.
|
||||
"/proc/cpuinfo",
|
||||
"/proc/diskstats",
|
||||
"/proc/meminfo",
|
||||
"/proc/stat",
|
||||
"/proc/swaps",
|
||||
"/proc/uptime",
|
||||
"/proc/net/dev",
|
||||
}
|
||||
for _, valid := range validDestinations {
|
||||
path, err := filepath.Rel(filepath.Join(rootfs, valid), dest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if path == "." {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
for _, invalid := range invalidDestinations {
|
||||
path, err := filepath.Rel(filepath.Join(rootfs, invalid), dest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if path == "." || !strings.HasPrefix(path, "..") {
|
||||
return fmt.Errorf("%q cannot be mounted because it is located inside %q", dest, invalid)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func setupDevSymlinks(rootfs string) error {
|
||||
var links = [][2]string{
|
||||
{"/proc/self/fd", "/dev/fd"},
|
||||
{"/proc/self/fd/0", "/dev/stdin"},
|
||||
{"/proc/self/fd/1", "/dev/stdout"},
|
||||
{"/proc/self/fd/2", "/dev/stderr"},
|
||||
}
|
||||
// kcore support can be toggled with CONFIG_PROC_KCORE; only create a symlink
|
||||
// in /dev if it exists in /proc.
|
||||
if _, err := os.Stat("/proc/kcore"); err == nil {
|
||||
links = append(links, [2]string{"/proc/kcore", "/dev/core"})
|
||||
}
|
||||
for _, link := range links {
|
||||
var (
|
||||
src = link[0]
|
||||
dst = filepath.Join(rootfs, link[1])
|
||||
)
|
||||
if err := os.Symlink(src, dst); err != nil && !os.IsExist(err) {
|
||||
return fmt.Errorf("symlink %s %s %s", src, dst, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// If stdin, stdout, and/or stderr are pointing to `/dev/null` in the parent's rootfs
|
||||
// this method will make them point to `/dev/null` in this container's rootfs. This
|
||||
// needs to be called after we chroot/pivot into the container's rootfs so that any
|
||||
// symlinks are resolved locally.
|
||||
func reOpenDevNull() error {
|
||||
var stat, devNullStat unix.Stat_t
|
||||
file, err := os.OpenFile("/dev/null", os.O_RDWR, 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed to open /dev/null - %s", err)
|
||||
}
|
||||
defer file.Close()
|
||||
if err := unix.Fstat(int(file.Fd()), &devNullStat); err != nil {
|
||||
return err
|
||||
}
|
||||
for fd := 0; fd < 3; fd++ {
|
||||
if err := unix.Fstat(fd, &stat); err != nil {
|
||||
return err
|
||||
}
|
||||
if stat.Rdev == devNullStat.Rdev {
|
||||
// Close and re-open the fd.
|
||||
if err := unix.Dup3(int(file.Fd()), fd, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Create the device nodes in the container.
|
||||
func createDevices(config *configs.Config) error {
|
||||
useBindMount := system.RunningInUserNS() || config.Namespaces.Contains(configs.NEWUSER)
|
||||
oldMask := unix.Umask(0000)
|
||||
for _, node := range config.Devices {
|
||||
// containers running in a user namespace are not allowed to mknod
|
||||
// devices so we can just bind mount it from the host.
|
||||
if err := createDeviceNode(config.Rootfs, node, useBindMount); err != nil {
|
||||
unix.Umask(oldMask)
|
||||
return err
|
||||
}
|
||||
}
|
||||
unix.Umask(oldMask)
|
||||
return nil
|
||||
}
|
||||
|
||||
func bindMountDeviceNode(dest string, node *configs.Device) error {
|
||||
f, err := os.Create(dest)
|
||||
if err != nil && !os.IsExist(err) {
|
||||
return err
|
||||
}
|
||||
if f != nil {
|
||||
f.Close()
|
||||
}
|
||||
return unix.Mount(node.Path, dest, "bind", unix.MS_BIND, "")
|
||||
}
|
||||
|
||||
// Creates the device node in the rootfs of the container.
|
||||
func createDeviceNode(rootfs string, node *configs.Device, bind bool) error {
|
||||
dest := filepath.Join(rootfs, node.Path)
|
||||
if err := os.MkdirAll(filepath.Dir(dest), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if bind {
|
||||
return bindMountDeviceNode(dest, node)
|
||||
}
|
||||
if err := mknodDevice(dest, node); err != nil {
|
||||
if os.IsExist(err) {
|
||||
return nil
|
||||
} else if os.IsPermission(err) {
|
||||
return bindMountDeviceNode(dest, node)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func mknodDevice(dest string, node *configs.Device) error {
|
||||
fileMode := node.FileMode
|
||||
switch node.Type {
|
||||
case 'c', 'u':
|
||||
fileMode |= unix.S_IFCHR
|
||||
case 'b':
|
||||
fileMode |= unix.S_IFBLK
|
||||
case 'p':
|
||||
fileMode |= unix.S_IFIFO
|
||||
default:
|
||||
return fmt.Errorf("%c is not a valid device type for device %s", node.Type, node.Path)
|
||||
}
|
||||
if err := unix.Mknod(dest, uint32(fileMode), node.Mkdev()); err != nil {
|
||||
return err
|
||||
}
|
||||
return unix.Chown(dest, int(node.Uid), int(node.Gid))
|
||||
}
|
||||
|
||||
func getMountInfo(mountinfo []*mount.Info, dir string) *mount.Info {
|
||||
for _, m := range mountinfo {
|
||||
if m.Mountpoint == dir {
|
||||
return m
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get the parent mount point of directory passed in as argument. Also return
|
||||
// optional fields.
|
||||
func getParentMount(rootfs string) (string, string, error) {
|
||||
var path string
|
||||
|
||||
mountinfos, err := mount.GetMounts()
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
|
||||
mountinfo := getMountInfo(mountinfos, rootfs)
|
||||
if mountinfo != nil {
|
||||
return rootfs, mountinfo.Optional, nil
|
||||
}
|
||||
|
||||
path = rootfs
|
||||
for {
|
||||
path = filepath.Dir(path)
|
||||
|
||||
mountinfo = getMountInfo(mountinfos, path)
|
||||
if mountinfo != nil {
|
||||
return path, mountinfo.Optional, nil
|
||||
}
|
||||
|
||||
if path == "/" {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// If we are here, we did not find parent mount. Something is wrong.
|
||||
return "", "", fmt.Errorf("Could not find parent mount of %s", rootfs)
|
||||
}
|
||||
|
||||
// Make parent mount private if it was shared
|
||||
func rootfsParentMountPrivate(rootfs string) error {
|
||||
sharedMount := false
|
||||
|
||||
parentMount, optionalOpts, err := getParentMount(rootfs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
optsSplit := strings.Split(optionalOpts, " ")
|
||||
for _, opt := range optsSplit {
|
||||
if strings.HasPrefix(opt, "shared:") {
|
||||
sharedMount = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Make parent mount PRIVATE if it was shared. It is needed for two
|
||||
// reasons. First of all pivot_root() will fail if parent mount is
|
||||
// shared. Secondly when we bind mount rootfs it will propagate to
|
||||
// parent namespace and we don't want that to happen.
|
||||
if sharedMount {
|
||||
return unix.Mount("", parentMount, "", unix.MS_PRIVATE, "")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func prepareRoot(config *configs.Config) error {
|
||||
flag := unix.MS_SLAVE | unix.MS_REC
|
||||
if config.RootPropagation != 0 {
|
||||
flag = config.RootPropagation
|
||||
}
|
||||
if err := unix.Mount("", "/", "", uintptr(flag), ""); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Make parent mount private to make sure following bind mount does
|
||||
// not propagate in other namespaces. Also it will help with kernel
|
||||
// check pass in pivot_root. (IS_SHARED(new_mnt->mnt_parent))
|
||||
if err := rootfsParentMountPrivate(config.Rootfs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return unix.Mount(config.Rootfs, config.Rootfs, "bind", unix.MS_BIND|unix.MS_REC, "")
|
||||
}
|
||||
|
||||
func setReadonly() error {
|
||||
return unix.Mount("/", "/", "bind", unix.MS_BIND|unix.MS_REMOUNT|unix.MS_RDONLY|unix.MS_REC, "")
|
||||
}
|
||||
|
||||
func setupPtmx(config *configs.Config) error {
|
||||
ptmx := filepath.Join(config.Rootfs, "dev/ptmx")
|
||||
if err := os.Remove(ptmx); err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
if err := os.Symlink("pts/ptmx", ptmx); err != nil {
|
||||
return fmt.Errorf("symlink dev ptmx %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// pivotRoot will call pivot_root such that rootfs becomes the new root
|
||||
// filesystem, and everything else is cleaned up.
|
||||
func pivotRoot(rootfs string) error {
|
||||
// While the documentation may claim otherwise, pivot_root(".", ".") is
|
||||
// actually valid. What this results in is / being the new root but
|
||||
// /proc/self/cwd being the old root. Since we can play around with the cwd
|
||||
// with pivot_root this allows us to pivot without creating directories in
|
||||
// the rootfs. Shout-outs to the LXC developers for giving us this idea.
|
||||
|
||||
oldroot, err := unix.Open("/", unix.O_DIRECTORY|unix.O_RDONLY, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer unix.Close(oldroot)
|
||||
|
||||
newroot, err := unix.Open(rootfs, unix.O_DIRECTORY|unix.O_RDONLY, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer unix.Close(newroot)
|
||||
|
||||
// Change to the new root so that the pivot_root actually acts on it.
|
||||
if err := unix.Fchdir(newroot); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := unix.PivotRoot(".", "."); err != nil {
|
||||
return fmt.Errorf("pivot_root %s", err)
|
||||
}
|
||||
|
||||
// Currently our "." is oldroot (according to the current kernel code).
|
||||
// However, purely for safety, we will fchdir(oldroot) since there isn't
|
||||
// really any guarantee from the kernel what /proc/self/cwd will be after a
|
||||
// pivot_root(2).
|
||||
|
||||
if err := unix.Fchdir(oldroot); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Make oldroot rprivate to make sure our unmounts don't propagate to the
|
||||
// host (and thus bork the machine).
|
||||
if err := unix.Mount("", ".", "", unix.MS_PRIVATE|unix.MS_REC, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
// Preform the unmount. MNT_DETACH allows us to unmount /proc/self/cwd.
|
||||
if err := unix.Unmount(".", unix.MNT_DETACH); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Switch back to our shiny new root.
|
||||
if err := unix.Chdir("/"); err != nil {
|
||||
return fmt.Errorf("chdir / %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func msMoveRoot(rootfs string) error {
|
||||
if err := unix.Mount(rootfs, "/", "", unix.MS_MOVE, ""); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := unix.Chroot("."); err != nil {
|
||||
return err
|
||||
}
|
||||
return unix.Chdir("/")
|
||||
}
|
||||
|
||||
// createIfNotExists creates a file or a directory only if it does not already exist.
|
||||
func createIfNotExists(path string, isDir bool) error {
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
if isDir {
|
||||
return os.MkdirAll(path, 0755)
|
||||
}
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
f, err := os.OpenFile(path, os.O_CREATE, 0755)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.Close()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// readonlyPath will make a path read only.
|
||||
func readonlyPath(path string) error {
|
||||
if err := unix.Mount(path, path, "", unix.MS_BIND|unix.MS_REC, ""); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
return unix.Mount(path, path, "", unix.MS_BIND|unix.MS_REMOUNT|unix.MS_RDONLY|unix.MS_REC, "")
|
||||
}
|
||||
|
||||
// remountReadonly will remount an existing mount point and ensure that it is read-only.
|
||||
func remountReadonly(m *configs.Mount) error {
|
||||
var (
|
||||
dest = m.Destination
|
||||
flags = m.Flags
|
||||
)
|
||||
for i := 0; i < 5; i++ {
|
||||
if err := unix.Mount("", dest, "", uintptr(flags|unix.MS_REMOUNT|unix.MS_RDONLY), ""); err != nil {
|
||||
switch err {
|
||||
case unix.EBUSY:
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
continue
|
||||
default:
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("unable to mount %s as readonly max retries reached", dest)
|
||||
}
|
||||
|
||||
// maskPath masks the top of the specified path inside a container to avoid
|
||||
// security issues from processes reading information from non-namespace aware
|
||||
// mounts ( proc/kcore ).
|
||||
// For files, maskPath bind mounts /dev/null over the top of the specified path.
|
||||
// For directories, maskPath mounts read-only tmpfs over the top of the specified path.
|
||||
func maskPath(path string) error {
|
||||
if err := unix.Mount("/dev/null", path, "", unix.MS_BIND, ""); err != nil && !os.IsNotExist(err) {
|
||||
if err == unix.ENOTDIR {
|
||||
return unix.Mount("tmpfs", path, "tmpfs", unix.MS_RDONLY, "")
|
||||
}
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// writeSystemProperty writes the value to a path under /proc/sys as determined from the key.
|
||||
// For e.g. net.ipv4.ip_forward translated to /proc/sys/net/ipv4/ip_forward.
|
||||
func writeSystemProperty(key, value string) error {
|
||||
keyPath := strings.Replace(key, ".", "/", -1)
|
||||
return ioutil.WriteFile(path.Join("/proc/sys", keyPath), []byte(value), 0644)
|
||||
}
|
||||
|
||||
func remount(m *configs.Mount, rootfs string) error {
|
||||
var (
|
||||
dest = m.Destination
|
||||
)
|
||||
if !strings.HasPrefix(dest, rootfs) {
|
||||
dest = filepath.Join(rootfs, dest)
|
||||
}
|
||||
if err := unix.Mount(m.Source, dest, m.Device, uintptr(m.Flags|unix.MS_REMOUNT), ""); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Do the mount operation followed by additional mounts required to take care
|
||||
// of propagation flags.
|
||||
func mountPropagate(m *configs.Mount, rootfs string, mountLabel string) error {
|
||||
var (
|
||||
dest = m.Destination
|
||||
data = label.FormatMountLabel(m.Data, mountLabel)
|
||||
flags = m.Flags
|
||||
)
|
||||
if libcontainerUtils.CleanPath(dest) == "/dev" {
|
||||
flags &= ^unix.MS_RDONLY
|
||||
}
|
||||
|
||||
copyUp := m.Extensions&configs.EXT_COPYUP == configs.EXT_COPYUP
|
||||
if !(copyUp || strings.HasPrefix(dest, rootfs)) {
|
||||
dest = filepath.Join(rootfs, dest)
|
||||
}
|
||||
|
||||
if err := unix.Mount(m.Source, dest, m.Device, uintptr(flags), data); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, pflag := range m.PropagationFlags {
|
||||
if err := unix.Mount("", dest, "", uintptr(pflag), ""); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
11
vendor/github.com/opencontainers/runc/libcontainer/setgroups_linux.go
generated
vendored
11
vendor/github.com/opencontainers/runc/libcontainer/setgroups_linux.go
generated
vendored
@@ -1,11 +0,0 @@
|
||||
// +build linux,go1.5
|
||||
|
||||
package libcontainer
|
||||
|
||||
import "syscall"
|
||||
|
||||
// Set the GidMappingsEnableSetgroups member to true, so the process's
|
||||
// setgroups proc entry wont be set to 'deny' if GidMappings are set
|
||||
func enableSetgroups(sys *syscall.SysProcAttr) {
|
||||
sys.GidMappingsEnableSetgroups = true
|
||||
}
|
||||
63
vendor/github.com/opencontainers/runc/libcontainer/setns_init_linux.go
generated
vendored
63
vendor/github.com/opencontainers/runc/libcontainer/setns_init_linux.go
generated
vendored
@@ -1,63 +0,0 @@
|
||||
// +build linux
|
||||
|
||||
package libcontainer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/apparmor"
|
||||
"github.com/opencontainers/runc/libcontainer/keys"
|
||||
"github.com/opencontainers/runc/libcontainer/seccomp"
|
||||
"github.com/opencontainers/runc/libcontainer/system"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
)
|
||||
|
||||
// linuxSetnsInit performs the container's initialization for running a new process
|
||||
// inside an existing container.
|
||||
type linuxSetnsInit struct {
|
||||
pipe *os.File
|
||||
consoleSocket *os.File
|
||||
config *initConfig
|
||||
}
|
||||
|
||||
func (l *linuxSetnsInit) getSessionRingName() string {
|
||||
return fmt.Sprintf("_ses.%s", l.config.ContainerId)
|
||||
}
|
||||
|
||||
func (l *linuxSetnsInit) Init() error {
|
||||
if !l.config.Config.NoNewKeyring {
|
||||
// do not inherit the parent's session keyring
|
||||
if _, err := keys.JoinSessionKeyring(l.getSessionRingName()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if l.config.CreateConsole {
|
||||
if err := setupConsole(l.consoleSocket, l.config, false); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := system.Setctty(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if l.config.NoNewPrivileges {
|
||||
if err := system.Prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if l.config.Config.Seccomp != nil {
|
||||
if err := seccomp.InitSeccomp(l.config.Config.Seccomp); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := finalizeNamespace(l.config); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := apparmor.ApplyProfile(l.config.AppArmorProfile); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := label.SetProcessLabel(l.config.ProcessLabel); err != nil {
|
||||
return err
|
||||
}
|
||||
return system.Execv(l.config.Args[0], l.config.Args[0:], os.Environ())
|
||||
}
|
||||
192
vendor/github.com/opencontainers/runc/libcontainer/standard_init_linux.go
generated
vendored
192
vendor/github.com/opencontainers/runc/libcontainer/standard_init_linux.go
generated
vendored
@@ -1,192 +0,0 @@
|
||||
// +build linux
|
||||
|
||||
package libcontainer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"syscall" //only for Exec
|
||||
|
||||
"github.com/opencontainers/runc/libcontainer/apparmor"
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
"github.com/opencontainers/runc/libcontainer/keys"
|
||||
"github.com/opencontainers/runc/libcontainer/seccomp"
|
||||
"github.com/opencontainers/runc/libcontainer/system"
|
||||
"github.com/opencontainers/selinux/go-selinux/label"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
type linuxStandardInit struct {
|
||||
pipe *os.File
|
||||
consoleSocket *os.File
|
||||
parentPid int
|
||||
stateDirFD int
|
||||
config *initConfig
|
||||
}
|
||||
|
||||
func (l *linuxStandardInit) getSessionRingParams() (string, uint32, uint32) {
|
||||
var newperms uint32
|
||||
|
||||
if l.config.Config.Namespaces.Contains(configs.NEWUSER) {
|
||||
// with user ns we need 'other' search permissions
|
||||
newperms = 0x8
|
||||
} else {
|
||||
// without user ns we need 'UID' search permissions
|
||||
newperms = 0x80000
|
||||
}
|
||||
|
||||
// create a unique per session container name that we can
|
||||
// join in setns; however, other containers can also join it
|
||||
return fmt.Sprintf("_ses.%s", l.config.ContainerId), 0xffffffff, newperms
|
||||
}
|
||||
|
||||
// PR_SET_NO_NEW_PRIVS isn't exposed in Golang so we define it ourselves copying the value
|
||||
// the kernel
|
||||
const PR_SET_NO_NEW_PRIVS = 0x26
|
||||
|
||||
func (l *linuxStandardInit) Init() error {
|
||||
if !l.config.Config.NoNewKeyring {
|
||||
ringname, keepperms, newperms := l.getSessionRingParams()
|
||||
|
||||
// do not inherit the parent's session keyring
|
||||
sessKeyId, err := keys.JoinSessionKeyring(ringname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// make session keyring searcheable
|
||||
if err := keys.ModKeyringPerm(sessKeyId, keepperms, newperms); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if err := setupNetwork(l.config); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := setupRoute(l.config.Config); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
label.Init()
|
||||
|
||||
// prepareRootfs() can be executed only for a new mount namespace.
|
||||
if l.config.Config.Namespaces.Contains(configs.NEWNS) {
|
||||
if err := prepareRootfs(l.pipe, l.config.Config); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Set up the console. This has to be done *before* we finalize the rootfs,
|
||||
// but *after* we've given the user the chance to set up all of the mounts
|
||||
// they wanted.
|
||||
if l.config.CreateConsole {
|
||||
if err := setupConsole(l.consoleSocket, l.config, true); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := system.Setctty(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Finish the rootfs setup.
|
||||
if l.config.Config.Namespaces.Contains(configs.NEWNS) {
|
||||
if err := finalizeRootfs(l.config.Config); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if hostname := l.config.Config.Hostname; hostname != "" {
|
||||
if err := unix.Sethostname([]byte(hostname)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := apparmor.ApplyProfile(l.config.AppArmorProfile); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := label.SetProcessLabel(l.config.ProcessLabel); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for key, value := range l.config.Config.Sysctl {
|
||||
if err := writeSystemProperty(key, value); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, path := range l.config.Config.ReadonlyPaths {
|
||||
if err := readonlyPath(path); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, path := range l.config.Config.MaskPaths {
|
||||
if err := maskPath(path); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
pdeath, err := system.GetParentDeathSignal()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if l.config.NoNewPrivileges {
|
||||
if err := system.Prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Tell our parent that we're ready to Execv. This must be done before the
|
||||
// Seccomp rules have been applied, because we need to be able to read and
|
||||
// write to a socket.
|
||||
if err := syncParentReady(l.pipe); err != nil {
|
||||
return err
|
||||
}
|
||||
// Without NoNewPrivileges seccomp is a privileged operation, so we need to
|
||||
// do this before dropping capabilities; otherwise do it as late as possible
|
||||
// just before execve so as few syscalls take place after it as possible.
|
||||
if l.config.Config.Seccomp != nil && !l.config.NoNewPrivileges {
|
||||
if err := seccomp.InitSeccomp(l.config.Config.Seccomp); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := finalizeNamespace(l.config); err != nil {
|
||||
return err
|
||||
}
|
||||
// finalizeNamespace can change user/group which clears the parent death
|
||||
// signal, so we restore it here.
|
||||
if err := pdeath.Restore(); err != nil {
|
||||
return err
|
||||
}
|
||||
// compare the parent from the initial start of the init process and make sure that it did not change.
|
||||
// if the parent changes that means it died and we were reparented to something else so we should
|
||||
// just kill ourself and not cause problems for someone else.
|
||||
if unix.Getppid() != l.parentPid {
|
||||
return unix.Kill(unix.Getpid(), unix.SIGKILL)
|
||||
}
|
||||
// check for the arg before waiting to make sure it exists and it is returned
|
||||
// as a create time error.
|
||||
name, err := exec.LookPath(l.config.Args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// close the pipe to signal that we have completed our init.
|
||||
l.pipe.Close()
|
||||
// wait for the fifo to be opened on the other side before
|
||||
// exec'ing the users process.
|
||||
fd, err := unix.Openat(l.stateDirFD, execFifoFilename, os.O_WRONLY|unix.O_CLOEXEC, 0)
|
||||
if err != nil {
|
||||
return newSystemErrorWithCause(err, "openat exec fifo")
|
||||
}
|
||||
if _, err := unix.Write(fd, []byte("0")); err != nil {
|
||||
return newSystemErrorWithCause(err, "write 0 exec fifo")
|
||||
}
|
||||
if l.config.Config.Seccomp != nil && l.config.NoNewPrivileges {
|
||||
if err := seccomp.InitSeccomp(l.config.Config.Seccomp); err != nil {
|
||||
return newSystemErrorWithCause(err, "init seccomp")
|
||||
}
|
||||
}
|
||||
// close the statedir fd before exec because the kernel resets dumpable in the wrong order
|
||||
// https://github.com/torvalds/linux/blob/v4.9/fs/exec.c#L1290-L1318
|
||||
unix.Close(l.stateDirFD)
|
||||
if err := syscall.Exec(name, l.config.Args[0:], os.Environ()); err != nil {
|
||||
return newSystemErrorWithCause(err, "exec user process")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
248
vendor/github.com/opencontainers/runc/libcontainer/state_linux.go
generated
vendored
248
vendor/github.com/opencontainers/runc/libcontainer/state_linux.go
generated
vendored
@@ -1,248 +0,0 @@
|
||||
// +build linux
|
||||
|
||||
package libcontainer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/Sirupsen/logrus"
|
||||
"github.com/opencontainers/runc/libcontainer/configs"
|
||||
"github.com/opencontainers/runc/libcontainer/utils"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func newStateTransitionError(from, to containerState) error {
|
||||
return &stateTransitionError{
|
||||
From: from.status().String(),
|
||||
To: to.status().String(),
|
||||
}
|
||||
}
|
||||
|
||||
// stateTransitionError is returned when an invalid state transition happens from one
|
||||
// state to another.
|
||||
type stateTransitionError struct {
|
||||
From string
|
||||
To string
|
||||
}
|
||||
|
||||
func (s *stateTransitionError) Error() string {
|
||||
return fmt.Sprintf("invalid state transition from %s to %s", s.From, s.To)
|
||||
}
|
||||
|
||||
type containerState interface {
|
||||
transition(containerState) error
|
||||
destroy() error
|
||||
status() Status
|
||||
}
|
||||
|
||||
func destroy(c *linuxContainer) error {
|
||||
if !c.config.Namespaces.Contains(configs.NEWPID) {
|
||||
if err := signalAllProcesses(c.cgroupManager, unix.SIGKILL); err != nil {
|
||||
logrus.Warn(err)
|
||||
}
|
||||
}
|
||||
err := c.cgroupManager.Destroy()
|
||||
if rerr := os.RemoveAll(c.root); err == nil {
|
||||
err = rerr
|
||||
}
|
||||
c.initProcess = nil
|
||||
if herr := runPoststopHooks(c); err == nil {
|
||||
err = herr
|
||||
}
|
||||
c.state = &stoppedState{c: c}
|
||||
return err
|
||||
}
|
||||
|
||||
func runPoststopHooks(c *linuxContainer) error {
|
||||
if c.config.Hooks != nil {
|
||||
s := configs.HookState{
|
||||
Version: c.config.Version,
|
||||
ID: c.id,
|
||||
Bundle: utils.SearchLabels(c.config.Labels, "bundle"),
|
||||
}
|
||||
for _, hook := range c.config.Hooks.Poststop {
|
||||
if err := hook.Run(s); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// stoppedState represents a container is a stopped/destroyed state.
|
||||
type stoppedState struct {
|
||||
c *linuxContainer
|
||||
}
|
||||
|
||||
func (b *stoppedState) status() Status {
|
||||
return Stopped
|
||||
}
|
||||
|
||||
func (b *stoppedState) transition(s containerState) error {
|
||||
switch s.(type) {
|
||||
case *runningState, *restoredState:
|
||||
b.c.state = s
|
||||
return nil
|
||||
case *stoppedState:
|
||||
return nil
|
||||
}
|
||||
return newStateTransitionError(b, s)
|
||||
}
|
||||
|
||||
func (b *stoppedState) destroy() error {
|
||||
return destroy(b.c)
|
||||
}
|
||||
|
||||
// runningState represents a container that is currently running.
|
||||
type runningState struct {
|
||||
c *linuxContainer
|
||||
}
|
||||
|
||||
func (r *runningState) status() Status {
|
||||
return Running
|
||||
}
|
||||
|
||||
func (r *runningState) transition(s containerState) error {
|
||||
switch s.(type) {
|
||||
case *stoppedState:
|
||||
t, err := r.c.runType()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if t == Running {
|
||||
return newGenericError(fmt.Errorf("container still running"), ContainerNotStopped)
|
||||
}
|
||||
r.c.state = s
|
||||
return nil
|
||||
case *pausedState:
|
||||
r.c.state = s
|
||||
return nil
|
||||
case *runningState:
|
||||
return nil
|
||||
}
|
||||
return newStateTransitionError(r, s)
|
||||
}
|
||||
|
||||
func (r *runningState) destroy() error {
|
||||
t, err := r.c.runType()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if t == Running {
|
||||
return newGenericError(fmt.Errorf("container is not destroyed"), ContainerNotStopped)
|
||||
}
|
||||
return destroy(r.c)
|
||||
}
|
||||
|
||||
type createdState struct {
|
||||
c *linuxContainer
|
||||
}
|
||||
|
||||
func (i *createdState) status() Status {
|
||||
return Created
|
||||
}
|
||||
|
||||
func (i *createdState) transition(s containerState) error {
|
||||
switch s.(type) {
|
||||
case *runningState, *pausedState, *stoppedState:
|
||||
i.c.state = s
|
||||
return nil
|
||||
case *createdState:
|
||||
return nil
|
||||
}
|
||||
return newStateTransitionError(i, s)
|
||||
}
|
||||
|
||||
func (i *createdState) destroy() error {
|
||||
i.c.initProcess.signal(unix.SIGKILL)
|
||||
return destroy(i.c)
|
||||
}
|
||||
|
||||
// pausedState represents a container that is currently pause. It cannot be destroyed in a
|
||||
// paused state and must transition back to running first.
|
||||
type pausedState struct {
|
||||
c *linuxContainer
|
||||
}
|
||||
|
||||
func (p *pausedState) status() Status {
|
||||
return Paused
|
||||
}
|
||||
|
||||
func (p *pausedState) transition(s containerState) error {
|
||||
switch s.(type) {
|
||||
case *runningState, *stoppedState:
|
||||
p.c.state = s
|
||||
return nil
|
||||
case *pausedState:
|
||||
return nil
|
||||
}
|
||||
return newStateTransitionError(p, s)
|
||||
}
|
||||
|
||||
func (p *pausedState) destroy() error {
|
||||
t, err := p.c.runType()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if t != Running && t != Created {
|
||||
if err := p.c.cgroupManager.Freeze(configs.Thawed); err != nil {
|
||||
return err
|
||||
}
|
||||
return destroy(p.c)
|
||||
}
|
||||
return newGenericError(fmt.Errorf("container is paused"), ContainerPaused)
|
||||
}
|
||||
|
||||
// restoredState is the same as the running state but also has associated checkpoint
|
||||
// information that maybe need destroyed when the container is stopped and destroy is called.
|
||||
type restoredState struct {
|
||||
imageDir string
|
||||
c *linuxContainer
|
||||
}
|
||||
|
||||
func (r *restoredState) status() Status {
|
||||
return Running
|
||||
}
|
||||
|
||||
func (r *restoredState) transition(s containerState) error {
|
||||
switch s.(type) {
|
||||
case *stoppedState, *runningState:
|
||||
return nil
|
||||
}
|
||||
return newStateTransitionError(r, s)
|
||||
}
|
||||
|
||||
func (r *restoredState) destroy() error {
|
||||
if _, err := os.Stat(filepath.Join(r.c.root, "checkpoint")); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return destroy(r.c)
|
||||
}
|
||||
|
||||
// loadedState is used whenever a container is restored, loaded, or setting additional
|
||||
// processes inside and it should not be destroyed when it is exiting.
|
||||
type loadedState struct {
|
||||
c *linuxContainer
|
||||
s Status
|
||||
}
|
||||
|
||||
func (n *loadedState) status() Status {
|
||||
return n.s
|
||||
}
|
||||
|
||||
func (n *loadedState) transition(s containerState) error {
|
||||
n.c.state = s
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *loadedState) destroy() error {
|
||||
if err := n.c.refreshState(); err != nil {
|
||||
return err
|
||||
}
|
||||
return n.c.state.destroy()
|
||||
}
|
||||
15
vendor/github.com/opencontainers/runc/libcontainer/stats.go
generated
vendored
15
vendor/github.com/opencontainers/runc/libcontainer/stats.go
generated
vendored
@@ -1,15 +0,0 @@
|
||||
package libcontainer
|
||||
|
||||
type NetworkInterface struct {
|
||||
// Name is the name of the network interface.
|
||||
Name string
|
||||
|
||||
RxBytes uint64
|
||||
RxPackets uint64
|
||||
RxErrors uint64
|
||||
RxDropped uint64
|
||||
TxBytes uint64
|
||||
TxPackets uint64
|
||||
TxErrors uint64
|
||||
TxDropped uint64
|
||||
}
|
||||
5
vendor/github.com/opencontainers/runc/libcontainer/stats_freebsd.go
generated
vendored
5
vendor/github.com/opencontainers/runc/libcontainer/stats_freebsd.go
generated
vendored
@@ -1,5 +0,0 @@
|
||||
package libcontainer
|
||||
|
||||
type Stats struct {
|
||||
Interfaces []*NetworkInterface
|
||||
}
|
||||
8
vendor/github.com/opencontainers/runc/libcontainer/stats_linux.go
generated
vendored
8
vendor/github.com/opencontainers/runc/libcontainer/stats_linux.go
generated
vendored
@@ -1,8 +0,0 @@
|
||||
package libcontainer
|
||||
|
||||
import "github.com/opencontainers/runc/libcontainer/cgroups"
|
||||
|
||||
type Stats struct {
|
||||
Interfaces []*NetworkInterface
|
||||
CgroupStats *cgroups.Stats
|
||||
}
|
||||
7
vendor/github.com/opencontainers/runc/libcontainer/stats_solaris.go
generated
vendored
7
vendor/github.com/opencontainers/runc/libcontainer/stats_solaris.go
generated
vendored
@@ -1,7 +0,0 @@
|
||||
package libcontainer
|
||||
|
||||
// Solaris - TODO
|
||||
|
||||
type Stats struct {
|
||||
Interfaces []*NetworkInterface
|
||||
}
|
||||
5
vendor/github.com/opencontainers/runc/libcontainer/stats_windows.go
generated
vendored
5
vendor/github.com/opencontainers/runc/libcontainer/stats_windows.go
generated
vendored
@@ -1,5 +0,0 @@
|
||||
package libcontainer
|
||||
|
||||
type Stats struct {
|
||||
Interfaces []*NetworkInterface
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user