Bump deps

This commit is contained in:
Jeff Mitchell
2017-09-05 18:06:47 -04:00
parent 7cd68af8f3
commit b0e29bb2ec
465 changed files with 37332 additions and 20695 deletions

View File

@@ -20,6 +20,7 @@ package optional
import ( import (
"fmt" "fmt"
"strings" "strings"
"time"
) )
type ( type (
@@ -37,6 +38,9 @@ type (
// Float64 is either a float64 or nil. // Float64 is either a float64 or nil.
Float64 interface{} Float64 interface{}
// Duration is either a time.Duration or nil.
Duration interface{}
) )
// ToBool returns its argument as a bool. // ToBool returns its argument as a bool.
@@ -89,6 +93,16 @@ func ToFloat64(v Float64) float64 {
return x return x
} }
// ToDuration returns its argument as a time.Duration.
// It panics if its argument is nil or not a time.Duration.
func ToDuration(v Duration) time.Duration {
x, ok := v.(time.Duration)
if !ok {
doPanic("Duration", v)
}
return x
}
func doPanic(capType string, v interface{}) { func doPanic(capType string, v interface{}) {
panic(fmt.Sprintf("optional.%s value should be %s, got %T", capType, strings.ToLower(capType), v)) panic(fmt.Sprintf("optional.%s value should be %s, got %T", capType, strings.ToLower(capType), v))
} }

View File

@@ -1,4 +1,4 @@
// Copyright 2014 Google Inc. All Rights Reserved. // Copyright 2014 Google Inc. LiveAndArchived Rights Reserved.
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@@ -71,6 +71,11 @@ func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *Buck
bkt = &raw.Bucket{} bkt = &raw.Bucket{}
} }
bkt.Name = b.name bkt.Name = b.name
// If there is lifecycle information but no location, explicitly set
// the location. This is a GCS quirk/bug.
if bkt.Location == "" && bkt.Lifecycle != nil {
bkt.Location = "US"
}
req := b.c.raw.Buckets.Insert(projectID, bkt) req := b.c.raw.Buckets.Insert(projectID, bkt)
setClientHeader(req.Header()) setClientHeader(req.Header())
return runWithRetry(ctx, func() error { _, err := req.Context(ctx).Do(); return err }) return runWithRetry(ctx, func() error { _, err := req.Context(ctx).Do(); return err })
@@ -230,6 +235,98 @@ type BucketAttrs struct {
// RequesterPays reports whether the bucket is a Requester Pays bucket. // RequesterPays reports whether the bucket is a Requester Pays bucket.
RequesterPays bool RequesterPays bool
// Lifecycle is the lifecycle configuration for objects in the bucket.
Lifecycle Lifecycle
}
// Lifecycle is the lifecycle configuration for objects in the bucket.
type Lifecycle struct {
Rules []LifecycleRule
}
const (
// RFC3339 date with only the date segment, used for CreatedBefore in LifecycleRule.
rfc3339Date = "2006-01-02"
// DeleteAction is a lifecycle action that deletes a live and/or archived
// objects. Takes precendence over SetStorageClass actions.
DeleteAction = "Delete"
// SetStorageClassAction changes the storage class of live and/or archived
// objects.
SetStorageClassAction = "SetStorageClass"
)
// LifecycleRule is a lifecycle configuration rule.
//
// When all the configured conditions are met by an object in the bucket, the
// configured action will automatically be taken on that object.
type LifecycleRule struct {
// Action is the action to take when all of the associated conditions are
// met.
Action LifecycleAction
// Condition is the set of conditions that must be met for the associated
// action to be taken.
Condition LifecycleCondition
}
// LifecycleAction is a lifecycle configuration action.
type LifecycleAction struct {
// Type is the type of action to take on matching objects.
//
// Acceptable values are "Delete" to delete matching objects and
// "SetStorageClass" to set the storage class defined in StorageClass on
// matching objects.
Type string
// StorageClass is the storage class to set on matching objects if the Action
// is "SetStorageClass".
StorageClass string
}
// Liveness specifies whether the object is live or not.
type Liveness int
const (
// LiveAndArchived includes both live and archived objects.
LiveAndArchived Liveness = iota
// Live specifies that the object is still live.
Live
// Archived specifies that the object is archived.
Archived
)
// LifecycleCondition is a set of conditions used to match objects and take an
// action automatically.
//
// All configured conditions must be met for the associated action to be taken.
type LifecycleCondition struct {
// AgeInDays is the age of the object in days.
AgeInDays int64
// CreatedBefore is the time the object was created.
//
// This condition is satisfied when an object is created before midnight of
// the specified date in UTC.
CreatedBefore time.Time
// Liveness specifies the object's liveness. Relevant only for versioned objects
Liveness Liveness
// MatchesStorageClasses is the condition matching the object's storage
// class.
//
// Values include "MULTI_REGIONAL", "REGIONAL", "NEARLINE", "COLDLINE",
// "STANDARD", and "DURABLE_REDUCED_AVAILABILITY".
MatchesStorageClasses []string
// NumNewerVersions is the condition matching objects with a number of newer versions.
//
// If the value is N, this condition is satisfied when there are at least N
// versions (including the live version) newer than this version of the
// object.
NumNewerVersions int64
} }
func newBucket(b *raw.Bucket) *BucketAttrs { func newBucket(b *raw.Bucket) *BucketAttrs {
@@ -245,6 +342,7 @@ func newBucket(b *raw.Bucket) *BucketAttrs {
VersioningEnabled: b.Versioning != nil && b.Versioning.Enabled, VersioningEnabled: b.Versioning != nil && b.Versioning.Enabled,
Labels: b.Labels, Labels: b.Labels,
RequesterPays: b.Billing != nil && b.Billing.RequesterPays, RequesterPays: b.Billing != nil && b.Billing.RequesterPays,
Lifecycle: toLifecycle(b.Lifecycle),
} }
acl := make([]ACLRule, len(b.Acl)) acl := make([]ACLRule, len(b.Acl))
for i, rule := range b.Acl { for i, rule := range b.Acl {
@@ -306,6 +404,7 @@ func (b *BucketAttrs) toRawBucket() *raw.Bucket {
Versioning: v, Versioning: v,
Labels: labels, Labels: labels,
Billing: bb, Billing: bb,
Lifecycle: toRawLifecycle(b.Lifecycle),
} }
} }
@@ -437,6 +536,75 @@ func applyBucketConds(method string, conds *BucketConditions, call interface{})
return nil return nil
} }
func toRawLifecycle(l Lifecycle) *raw.BucketLifecycle {
var rl raw.BucketLifecycle
if len(l.Rules) == 0 {
return nil
}
for _, r := range l.Rules {
rr := &raw.BucketLifecycleRule{
Action: &raw.BucketLifecycleRuleAction{
Type: r.Action.Type,
StorageClass: r.Action.StorageClass,
},
Condition: &raw.BucketLifecycleRuleCondition{
Age: r.Condition.AgeInDays,
MatchesStorageClass: r.Condition.MatchesStorageClasses,
NumNewerVersions: r.Condition.NumNewerVersions,
},
}
switch r.Condition.Liveness {
case LiveAndArchived:
rr.Condition.IsLive = nil
case Live:
rr.Condition.IsLive = googleapi.Bool(true)
case Archived:
rr.Condition.IsLive = googleapi.Bool(false)
}
if !r.Condition.CreatedBefore.IsZero() {
rr.Condition.CreatedBefore = r.Condition.CreatedBefore.Format(rfc3339Date)
}
rl.Rule = append(rl.Rule, rr)
}
return &rl
}
func toLifecycle(rl *raw.BucketLifecycle) Lifecycle {
var l Lifecycle
if rl == nil {
return l
}
for _, rr := range rl.Rule {
r := LifecycleRule{
Action: LifecycleAction{
Type: rr.Action.Type,
StorageClass: rr.Action.StorageClass,
},
Condition: LifecycleCondition{
AgeInDays: rr.Condition.Age,
MatchesStorageClasses: rr.Condition.MatchesStorageClass,
NumNewerVersions: rr.Condition.NumNewerVersions,
},
}
switch {
case rr.Condition.IsLive == nil:
r.Condition.Liveness = LiveAndArchived
case *rr.Condition.IsLive == true:
r.Condition.Liveness = Live
case *rr.Condition.IsLive == false:
r.Condition.Liveness = Archived
}
if rr.Condition.CreatedBefore != "" {
r.Condition.CreatedBefore, _ = time.Parse(rfc3339Date, rr.Condition.CreatedBefore)
}
}
return l
}
// Objects returns an iterator over the objects in the bucket that match the Query q. // Objects returns an iterator over the objects in the bucket that match the Query q.
// If q is nil, no filtering is done. // If q is nil, no filtering is done.
func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator { func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator {

30
vendor/cloud.google.com/go/storage/go110.go generated vendored Normal file
View File

@@ -0,0 +1,30 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build go1.10
package storage
import "google.golang.org/api/googleapi"
func shouldRetry(err error) bool {
switch e := err.(type) {
case *googleapi.Error:
// Retry on 429 and 5xx, according to
// https://cloud.google.com/storage/docs/exponential-backoff.
return e.Code == 429 || (e.Code >= 500 && e.Code < 600)
default:
return false
}
}

View File

@@ -18,7 +18,6 @@ import (
"cloud.google.com/go/internal" "cloud.google.com/go/internal"
gax "github.com/googleapis/gax-go" gax "github.com/googleapis/gax-go"
"golang.org/x/net/context" "golang.org/x/net/context"
"google.golang.org/api/googleapi"
) )
// runWithRetry calls the function until it returns nil or a non-retryable error, or // runWithRetry calls the function until it returns nil or a non-retryable error, or
@@ -29,13 +28,7 @@ func runWithRetry(ctx context.Context, call func() error) error {
if err == nil { if err == nil {
return true, nil return true, nil
} }
e, ok := err.(*googleapi.Error) if shouldRetry(err) {
if !ok {
return true, err
}
// Retry on 429 and 5xx, according to
// https://cloud.google.com/storage/docs/exponential-backoff.
if e.Code == 429 || (e.Code >= 500 && e.Code < 600) {
return false, nil return false, nil
} }
return true, err return true, err

40
vendor/cloud.google.com/go/storage/not_go110.go generated vendored Normal file
View File

@@ -0,0 +1,40 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !go1.10
package storage
import (
"net/url"
"strings"
"google.golang.org/api/googleapi"
)
func shouldRetry(err error) bool {
switch e := err.(type) {
case *googleapi.Error:
// Retry on 429 and 5xx, according to
// https://cloud.google.com/storage/docs/exponential-backoff.
return e.Code == 429 || (e.Code >= 500 && e.Code < 600)
case *url.Error:
// Retry on REFUSED_STREAM.
// Unfortunately the error type is unexported, so we resort to string
// matching.
return strings.Contains(e.Error(), "REFUSED_STREAM")
default:
return false
}
}

View File

@@ -28,6 +28,7 @@ type Reader struct {
body io.ReadCloser body io.ReadCloser
remain, size int64 remain, size int64
contentType string contentType string
cacheControl string
checkCRC bool // should we check the CRC? checkCRC bool // should we check the CRC?
wantCRC uint32 // the CRC32c value the server sent in the header wantCRC uint32 // the CRC32c value the server sent in the header
gotCRC uint32 // running crc gotCRC uint32 // running crc
@@ -72,3 +73,8 @@ func (r *Reader) Remain() int64 {
func (r *Reader) ContentType() string { func (r *Reader) ContentType() string {
return r.contentType return r.contentType
} }
// CacheControl returns the cache control of the object.
func (r *Reader) CacheControl() string {
return r.cacheControl
}

View File

@@ -36,7 +36,7 @@ import (
"unicode/utf8" "unicode/utf8"
"google.golang.org/api/option" "google.golang.org/api/option"
"google.golang.org/api/transport" htransport "google.golang.org/api/transport/http"
"cloud.google.com/go/internal/optional" "cloud.google.com/go/internal/optional"
"cloud.google.com/go/internal/version" "cloud.google.com/go/internal/version"
@@ -89,7 +89,7 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error
option.WithUserAgent(userAgent), option.WithUserAgent(userAgent),
} }
opts = append(o, opts...) opts = append(o, opts...)
hc, ep, err := transport.NewHTTPClient(ctx, opts...) hc, ep, err := htransport.NewClient(ctx, opts...)
if err != nil { if err != nil {
return nil, fmt.Errorf("dialing: %v", err) return nil, fmt.Errorf("dialing: %v", err)
} }
@@ -567,12 +567,13 @@ func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64)
crc, checkCRC = parseCRC32c(res) crc, checkCRC = parseCRC32c(res)
} }
return &Reader{ return &Reader{
body: body, body: body,
size: size, size: size,
remain: remain, remain: remain,
contentType: res.Header.Get("Content-Type"), contentType: res.Header.Get("Content-Type"),
wantCRC: crc, cacheControl: res.Header.Get("Cache-Control"),
checkCRC: checkCRC, wantCRC: crc,
checkCRC: checkCRC,
}, nil }, nil
} }
@@ -720,11 +721,16 @@ type ObjectAttrs struct {
// sent in the response headers. // sent in the response headers.
ContentDisposition string ContentDisposition string
// MD5 is the MD5 hash of the object's content. This field is read-only. // MD5 is the MD5 hash of the object's content. This field is read-only,
// except when used from a Writer. If set on a Writer, the uploaded
// data is rejected if its MD5 hash does not match this field.
MD5 []byte MD5 []byte
// CRC32C is the CRC32 checksum of the object's content using // CRC32C is the CRC32 checksum of the object's content using
// the Castagnoli93 polynomial. This field is read-only. // the Castagnoli93 polynomial. This field is read-only, except when
// used from a Writer. If set on a Writer and Writer.SendCRC32C
// is true, the uploaded data is rejected if its CRC32c hash does not
// match this field.
CRC32C uint32 CRC32C uint32
// MediaLink is an URL to the object's content. This field is read-only. // MediaLink is an URL to the object's content. This field is read-only.
@@ -812,26 +818,27 @@ func newObject(o *raw.Object) *ObjectAttrs {
sha256 = o.CustomerEncryption.KeySha256 sha256 = o.CustomerEncryption.KeySha256
} }
return &ObjectAttrs{ return &ObjectAttrs{
Bucket: o.Bucket, Bucket: o.Bucket,
Name: o.Name, Name: o.Name,
ContentType: o.ContentType, ContentType: o.ContentType,
ContentLanguage: o.ContentLanguage, ContentLanguage: o.ContentLanguage,
CacheControl: o.CacheControl, CacheControl: o.CacheControl,
ACL: acl, ACL: acl,
Owner: owner, Owner: owner,
ContentEncoding: o.ContentEncoding, ContentEncoding: o.ContentEncoding,
Size: int64(o.Size), ContentDisposition: o.ContentDisposition,
MD5: md5, Size: int64(o.Size),
CRC32C: crc32c, MD5: md5,
MediaLink: o.MediaLink, CRC32C: crc32c,
Metadata: o.Metadata, MediaLink: o.MediaLink,
Generation: o.Generation, Metadata: o.Metadata,
Metageneration: o.Metageneration, Generation: o.Generation,
StorageClass: o.StorageClass, Metageneration: o.Metageneration,
CustomerKeySHA256: sha256, StorageClass: o.StorageClass,
Created: convertTime(o.TimeCreated), CustomerKeySHA256: sha256,
Deleted: convertTime(o.TimeDeleted), Created: convertTime(o.TimeCreated),
Updated: convertTime(o.Updated), Deleted: convertTime(o.TimeDeleted),
Updated: convertTime(o.Updated),
} }
} }

View File

@@ -36,6 +36,8 @@ type Writer struct {
// SendCRC specifies whether to transmit a CRC32C field. It should be set // SendCRC specifies whether to transmit a CRC32C field. It should be set
// to true in addition to setting the Writer's CRC32C field, because zero // to true in addition to setting the Writer's CRC32C field, because zero
// is a valid CRC and normally a zero would not be transmitted. // is a valid CRC and normally a zero would not be transmitted.
// If a CRC32C is sent, and the data written does not match the checksum,
// the write will be rejected.
SendCRC32C bool SendCRC32C bool
// ChunkSize controls the maximum number of bytes of the object that the // ChunkSize controls the maximum number of bytes of the object that the

5
vendor/github.com/Azure/azure-sdk-for-go/NOTICE generated vendored Normal file
View File

@@ -0,0 +1,5 @@
Microsoft Azure-SDK-for-Go
Copyright 2014-2017 Microsoft
This product includes software developed at
the Microsoft Corporation (https://www.microsoft.com).

View File

@@ -11,6 +11,8 @@ import (
// PutAppendBlob initializes an empty append blob with specified name. An // PutAppendBlob initializes an empty append blob with specified name. An
// append blob must be created using this method before appending blocks. // append blob must be created using this method before appending blocks.
// //
// See CreateBlockBlobFromReader for more info on creating blobs.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob // See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob
func (b *Blob) PutAppendBlob(options *PutBlobOptions) error { func (b *Blob) PutAppendBlob(options *PutBlobOptions) error {
params := url.Values{} params := url.Values{}

View File

@@ -50,7 +50,7 @@ func (c *Client) addAuthorizationHeader(verb, url string, headers map[string]str
} }
func (c *Client) getSharedKey(verb, url string, headers map[string]string, auth authentication) (string, error) { func (c *Client) getSharedKey(verb, url string, headers map[string]string, auth authentication) (string, error) {
canRes, err := c.buildCanonicalizedResource(url, auth) canRes, err := c.buildCanonicalizedResource(url, auth, false)
if err != nil { if err != nil {
return "", err return "", err
} }
@@ -62,15 +62,18 @@ func (c *Client) getSharedKey(verb, url string, headers map[string]string, auth
return c.createAuthorizationHeader(canString, auth), nil return c.createAuthorizationHeader(canString, auth), nil
} }
func (c *Client) buildCanonicalizedResource(uri string, auth authentication) (string, error) { func (c *Client) buildCanonicalizedResource(uri string, auth authentication, sas bool) (string, error) {
errMsg := "buildCanonicalizedResource error: %s" errMsg := "buildCanonicalizedResource error: %s"
u, err := url.Parse(uri) u, err := url.Parse(uri)
if err != nil { if err != nil {
return "", fmt.Errorf(errMsg, err.Error()) return "", fmt.Errorf(errMsg, err.Error())
} }
cr := bytes.NewBufferString("/") cr := bytes.NewBufferString("")
cr.WriteString(c.getCanonicalizedAccountName()) if c.accountName != StorageEmulatorAccountName || !sas {
cr.WriteString("/")
cr.WriteString(c.getCanonicalizedAccountName())
}
if len(u.Path) > 0 { if len(u.Path) > 0 {
// Any portion of the CanonicalizedResource string that is derived from // Any portion of the CanonicalizedResource string that is derived from

View File

@@ -182,6 +182,9 @@ func (br BlobRange) String() string {
// Get returns a stream to read the blob. Caller must call both Read and Close() // Get returns a stream to read the blob. Caller must call both Read and Close()
// to correctly close the underlying connection. // to correctly close the underlying connection.
//
// See the GetRange method for use with a Range header.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Blob // See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Get-Blob
func (b *Blob) Get(options *GetBlobOptions) (io.ReadCloser, error) { func (b *Blob) Get(options *GetBlobOptions) (io.ReadCloser, error) {
rangeOptions := GetBlobRangeOptions{ rangeOptions := GetBlobRangeOptions{

View File

@@ -19,7 +19,7 @@ func (b *Blob) GetSASURIWithSignedIPAndProtocol(expiry time.Time, permissions st
signedPermissions = permissions signedPermissions = permissions
blobURL = b.GetURL() blobURL = b.GetURL()
) )
canonicalizedResource, err := b.Container.bsc.client.buildCanonicalizedResource(blobURL, b.Container.bsc.auth) canonicalizedResource, err := b.Container.bsc.client.buildCanonicalizedResource(blobURL, b.Container.bsc.auth, true)
if err != nil { if err != nil {
return "", err return "", err
} }
@@ -44,7 +44,7 @@ func (b *Blob) GetSASURIWithSignedIPAndProtocol(expiry time.Time, permissions st
signedResource = "b" signedResource = "b"
} }
protocols := "https,http" protocols := ""
if HTTPSOnly { if HTTPSOnly {
protocols = "https" protocols = "https"
} }
@@ -63,7 +63,9 @@ func (b *Blob) GetSASURIWithSignedIPAndProtocol(expiry time.Time, permissions st
} }
if b.Container.bsc.client.apiVersion >= "2015-04-05" { if b.Container.bsc.client.apiVersion >= "2015-04-05" {
sasParams.Add("spr", protocols) if protocols != "" {
sasParams.Add("spr", protocols)
}
if signedIPRange != "" { if signedIPRange != "" {
sasParams.Add("sip", signedIPRange) sasParams.Add("sip", signedIPRange)
} }

View File

@@ -68,6 +68,8 @@ type BlockResponse struct {
// CreateBlockBlob initializes an empty block blob with no blocks. // CreateBlockBlob initializes an empty block blob with no blocks.
// //
// See CreateBlockBlobFromReader for more info on creating blobs.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob // See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob
func (b *Blob) CreateBlockBlob(options *PutBlobOptions) error { func (b *Blob) CreateBlockBlob(options *PutBlobOptions) error {
return b.CreateBlockBlobFromReader(nil, options) return b.CreateBlockBlobFromReader(nil, options)
@@ -77,10 +79,17 @@ func (b *Blob) CreateBlockBlob(options *PutBlobOptions) error {
// reader. Size must be the number of bytes read from reader. To // reader. Size must be the number of bytes read from reader. To
// create an empty blob, use size==0 and reader==nil. // create an empty blob, use size==0 and reader==nil.
// //
// Any headers set in blob.Properties or metadata in blob.Metadata
// will be set on the blob.
//
// The API rejects requests with size > 256 MiB (but this limit is not // The API rejects requests with size > 256 MiB (but this limit is not
// checked by the SDK). To write a larger blob, use CreateBlockBlob, // checked by the SDK). To write a larger blob, use CreateBlockBlob,
// PutBlock, and PutBlockList. // PutBlock, and PutBlockList.
// //
// To create a blob from scratch, call container.GetBlobReference() to
// get an empty blob, fill in blob.Properties and blob.Metadata as
// appropriate then call this method.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob // See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob
func (b *Blob) CreateBlockBlobFromReader(blob io.Reader, options *PutBlobOptions) error { func (b *Blob) CreateBlockBlobFromReader(blob io.Reader, options *PutBlobOptions) error {
params := url.Values{} params := url.Values{}

View File

@@ -398,8 +398,17 @@ func (c Client) exec(verb, url string, headers map[string]string, body io.Reader
return nil, errors.New("azure/storage: error creating request: " + err.Error()) return nil, errors.New("azure/storage: error creating request: " + err.Error())
} }
// if a body was provided ensure that the content length was set.
// http.NewRequest() will automatically do this for a handful of types
// and for those that it doesn't we will handle here.
if body != nil && req.ContentLength < 1 {
if lr, ok := body.(*io.LimitedReader); ok {
setContentLengthFromLimitedReader(req, lr)
}
}
for k, v := range headers { for k, v := range headers {
req.Header.Add(k, v) req.Header[k] = append(req.Header[k], v) // Must bypass case munging present in `Add` by using map functions directly. See https://github.com/Azure/azure-sdk-for-go/issues/645
} }
resp, err := c.Sender.Send(&c, req) resp, err := c.Sender.Send(&c, req)

View File

@@ -50,7 +50,7 @@ type IncrementalCopyOptionsConditions struct {
// Copy starts a blob copy operation and waits for the operation to // Copy starts a blob copy operation and waits for the operation to
// complete. sourceBlob parameter must be a canonical URL to the blob (can be // complete. sourceBlob parameter must be a canonical URL to the blob (can be
// obtained using GetBlobURL method.) There is no SLA on blob copy and therefore // obtained using the GetURL method.) There is no SLA on blob copy and therefore
// this helper method works faster on smaller files. // this helper method works faster on smaller files.
// //
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Copy-Blob // See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Copy-Blob
@@ -65,7 +65,7 @@ func (b *Blob) Copy(sourceBlob string, options *CopyOptions) error {
// StartCopy starts a blob copy operation. // StartCopy starts a blob copy operation.
// sourceBlob parameter must be a canonical URL to the blob (can be // sourceBlob parameter must be a canonical URL to the blob (can be
// obtained using GetBlobURL method.) // obtained using the GetURL method.)
// //
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Copy-Blob // See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Copy-Blob
func (b *Blob) StartCopy(sourceBlob string, options *CopyOptions) (string, error) { func (b *Blob) StartCopy(sourceBlob string, options *CopyOptions) (string, error) {

View File

@@ -4,6 +4,7 @@ import (
"encoding/xml" "encoding/xml"
"net/http" "net/http"
"net/url" "net/url"
"sync"
) )
// Directory represents a directory on a share. // Directory represents a directory on a share.
@@ -169,6 +170,7 @@ func (d *Directory) GetFileReference(name string) *File {
Name: name, Name: name,
parent: d, parent: d,
share: d.share, share: d.share,
mutex: &sync.Mutex{},
} }
} }

View File

@@ -8,6 +8,7 @@ import (
"net/http" "net/http"
"net/url" "net/url"
"strconv" "strconv"
"sync"
) )
const fourMB = uint64(4194304) const fourMB = uint64(4194304)
@@ -22,6 +23,7 @@ type File struct {
Properties FileProperties `xml:"Properties"` Properties FileProperties `xml:"Properties"`
share *Share share *Share
FileCopyProperties FileCopyState FileCopyProperties FileCopyState
mutex *sync.Mutex
} }
// FileProperties contains various properties of a file. // FileProperties contains various properties of a file.
@@ -148,7 +150,9 @@ func (f *File) CopyFile(sourceURL string, options *FileRequestOptions) error {
return err return err
} }
f.updateEtagLastModifiedAndCopyHeaders(headers) f.updateEtagAndLastModified(headers)
f.FileCopyProperties.ID = headers.Get("X-Ms-Copy-Id")
f.FileCopyProperties.Status = headers.Get("X-Ms-Copy-Status")
return nil return nil
} }
@@ -399,14 +403,6 @@ func (f *File) updateEtagAndLastModified(headers http.Header) {
f.Properties.LastModified = headers.Get("Last-Modified") f.Properties.LastModified = headers.Get("Last-Modified")
} }
// updates Etag, last modified date and x-ms-copy-id
func (f *File) updateEtagLastModifiedAndCopyHeaders(headers http.Header) {
f.Properties.Etag = headers.Get("Etag")
f.Properties.LastModified = headers.Get("Last-Modified")
f.FileCopyProperties.ID = headers.Get("X-Ms-Copy-Id")
f.FileCopyProperties.Status = headers.Get("X-Ms-Copy-Status")
}
// updates file properties from the specified HTTP header // updates file properties from the specified HTTP header
func (f *File) updateProperties(header http.Header) { func (f *File) updateProperties(header http.Header) {
size, err := strconv.ParseUint(header.Get("Content-Length"), 10, 64) size, err := strconv.ParseUint(header.Get("Content-Length"), 10, 64)
@@ -430,7 +426,7 @@ func (f *File) URL() string {
return f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), nil) return f.fsc.client.getEndpoint(fileServiceName, f.buildPath(), nil)
} }
// WriteRangeOptions includes opptions for a write file range operation // WriteRangeOptions includes options for a write file range operation
type WriteRangeOptions struct { type WriteRangeOptions struct {
Timeout uint Timeout uint
ContentMD5 string ContentMD5 string
@@ -456,7 +452,11 @@ func (f *File) WriteRange(bytes io.Reader, fileRange FileRange, options *WriteRa
if err != nil { if err != nil {
return err return err
} }
// it's perfectly legal for multiple go routines to call WriteRange
// on the same *File (e.g. concurrently writing non-overlapping ranges)
// so we must take the file mutex before updating our properties.
f.mutex.Lock()
f.updateEtagAndLastModified(headers) f.updateEtagAndLastModified(headers)
f.mutex.Unlock()
return nil return nil
} }

View File

@@ -160,6 +160,8 @@ func (b *Blob) GetPageRanges(options *GetPageRangesOptions) (GetPageRangesRespon
// size in bytes (size must be aligned to a 512-byte boundary). A page blob must // size in bytes (size must be aligned to a 512-byte boundary). A page blob must
// be created using this method before writing pages. // be created using this method before writing pages.
// //
// See CreateBlockBlobFromReader for more info on creating blobs.
//
// See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob // See https://docs.microsoft.com/en-us/rest/api/storageservices/fileservices/Put-Blob
func (b *Blob) PutPageBlob(options *PutBlobOptions) error { func (b *Blob) PutPageBlob(options *PutBlobOptions) error {
if b.Properties.ContentLength%512 != 0 { if b.Properties.ContentLength%512 != 0 {

View File

@@ -136,7 +136,7 @@ func addTimeout(params url.Values, timeout uint) url.Values {
func addSnapshot(params url.Values, snapshot *time.Time) url.Values { func addSnapshot(params url.Values, snapshot *time.Time) url.Values {
if snapshot != nil { if snapshot != nil {
params.Add("snapshot", timeRfc1123Formatted(*snapshot)) params.Add("snapshot", snapshot.Format("2006-01-02T15:04:05.0000000Z"))
} }
return params return params
} }

View File

@@ -0,0 +1,12 @@
// +build !go1.8
package storage
import (
"io"
"net/http"
)
func setContentLengthFromLimitedReader(req *http.Request, lr *io.LimitedReader) {
req.ContentLength = lr.N
}

View File

@@ -0,0 +1,18 @@
// +build go1.8
package storage
import (
"io"
"io/ioutil"
"net/http"
)
func setContentLengthFromLimitedReader(req *http.Request, lr *io.LimitedReader) {
req.ContentLength = lr.N
snapshot := *lr
req.GetBody = func() (io.ReadCloser, error) {
r := snapshot
return ioutil.NopCloser(&r), nil
}
}

View File

@@ -33,6 +33,9 @@ const (
// managedIdentitySettingsPath is the path to the MSI Extension settings file (to discover the endpoint) // managedIdentitySettingsPath is the path to the MSI Extension settings file (to discover the endpoint)
managedIdentitySettingsPath = "/var/lib/waagent/ManagedIdentity-Settings" managedIdentitySettingsPath = "/var/lib/waagent/ManagedIdentity-Settings"
// metadataHeader is the header required by MSI extension
metadataHeader = "Metadata"
) )
var expirationBase time.Time var expirationBase time.Time
@@ -364,6 +367,9 @@ func (spt *ServicePrincipalToken) refreshInternal(resource string) error {
req.ContentLength = int64(len(s)) req.ContentLength = int64(len(s))
req.Header.Set(contentType, mimeTypeFormPost) req.Header.Set(contentType, mimeTypeFormPost)
if _, ok := spt.secret.(*ServicePrincipalMSISecret); ok {
req.Header.Set(metadataHeader, "true")
}
resp, err := spt.sender.Do(req) resp, err := spt.sender.Do(req)
if err != nil { if err != nil {
return fmt.Errorf("adal: Failed to execute the refresh request. Error = '%v'", err) return fmt.Errorf("adal: Failed to execute the refresh request. Error = '%v'", err)

View File

@@ -3,10 +3,18 @@ package autorest
import ( import (
"fmt" "fmt"
"net/http" "net/http"
"net/url"
"strings"
"github.com/Azure/go-autorest/autorest/adal" "github.com/Azure/go-autorest/autorest/adal"
) )
const (
bearerChallengeHeader = "Www-Authenticate"
bearer = "Bearer"
tenantID = "tenantID"
)
// Authorizer is the interface that provides a PrepareDecorator used to supply request // Authorizer is the interface that provides a PrepareDecorator used to supply request
// authorization. Most often, the Authorizer decorator runs last so it has access to the full // authorization. Most often, the Authorizer decorator runs last so it has access to the full
// state of the formed HTTP request. // state of the formed HTTP request.
@@ -55,3 +63,105 @@ func (ba *BearerAuthorizer) WithAuthorization() PrepareDecorator {
}) })
} }
} }
// BearerAuthorizerCallbackFunc is the authentication callback signature.
type BearerAuthorizerCallbackFunc func(tenantID, resource string) (*BearerAuthorizer, error)
// BearerAuthorizerCallback implements bearer authorization via a callback.
type BearerAuthorizerCallback struct {
sender Sender
callback BearerAuthorizerCallbackFunc
}
// NewBearerAuthorizerCallback creates a bearer authorization callback. The callback
// is invoked when the HTTP request is submitted.
func NewBearerAuthorizerCallback(sender Sender, callback BearerAuthorizerCallbackFunc) *BearerAuthorizerCallback {
if sender == nil {
sender = &http.Client{}
}
return &BearerAuthorizerCallback{sender: sender, callback: callback}
}
// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose value
// is "Bearer " followed by the token. The BearerAuthorizer is obtained via a user-supplied callback.
//
// By default, the token will be automatically refreshed through the Refresher interface.
func (bacb *BearerAuthorizerCallback) WithAuthorization() PrepareDecorator {
return func(p Preparer) Preparer {
return PreparerFunc(func(r *http.Request) (*http.Request, error) {
// make a copy of the request and remove the body as it's not
// required and avoids us having to create a copy of it.
rCopy := *r
removeRequestBody(&rCopy)
resp, err := bacb.sender.Do(&rCopy)
if err == nil && resp.StatusCode == 401 {
defer resp.Body.Close()
if hasBearerChallenge(resp) {
bc, err := newBearerChallenge(resp)
if err != nil {
return r, err
}
if bacb.callback != nil {
ba, err := bacb.callback(bc.values[tenantID], bc.values["resource"])
if err != nil {
return r, err
}
return ba.WithAuthorization()(p).Prepare(r)
}
}
}
return r, err
})
}
}
// returns true if the HTTP response contains a bearer challenge
func hasBearerChallenge(resp *http.Response) bool {
authHeader := resp.Header.Get(bearerChallengeHeader)
if len(authHeader) == 0 || strings.Index(authHeader, bearer) < 0 {
return false
}
return true
}
type bearerChallenge struct {
values map[string]string
}
func newBearerChallenge(resp *http.Response) (bc bearerChallenge, err error) {
challenge := strings.TrimSpace(resp.Header.Get(bearerChallengeHeader))
trimmedChallenge := challenge[len(bearer)+1:]
// challenge is a set of key=value pairs that are comma delimited
pairs := strings.Split(trimmedChallenge, ",")
if len(pairs) < 1 {
err = fmt.Errorf("challenge '%s' contains no pairs", challenge)
return bc, err
}
bc.values = make(map[string]string)
for i := range pairs {
trimmedPair := strings.TrimSpace(pairs[i])
pair := strings.Split(trimmedPair, "=")
if len(pair) == 2 {
// remove the enclosing quotes
key := strings.Trim(pair[0], "\"")
value := strings.Trim(pair[1], "\"")
switch key {
case "authorization", "authorization_uri":
// strip the tenant ID from the authorization URL
asURL, err := url.Parse(value)
if err != nil {
return bc, err
}
bc.values[tenantID] = asURL.Path[1:]
default:
bc.values[key] = value
}
}
}
return bc, err
}

View File

@@ -35,6 +35,7 @@ var (
statusCodesForRetry = []int{ statusCodesForRetry = []int{
http.StatusRequestTimeout, // 408 http.StatusRequestTimeout, // 408
http.StatusTooManyRequests, // 429
http.StatusInternalServerError, // 500 http.StatusInternalServerError, // 500
http.StatusBadGateway, // 502 http.StatusBadGateway, // 502
http.StatusServiceUnavailable, // 503 http.StatusServiceUnavailable, // 503

View File

@@ -31,6 +31,9 @@ type DetailedError struct {
// Service Error is the response body of failed API in bytes // Service Error is the response body of failed API in bytes
ServiceError []byte ServiceError []byte
// Response is the response object that was returned during failure if applicable.
Response *http.Response
} }
// NewError creates a new Error conforming object from the passed packageType, method, and // NewError creates a new Error conforming object from the passed packageType, method, and
@@ -67,6 +70,7 @@ func NewErrorWithError(original error, packageType string, method string, resp *
Method: method, Method: method,
StatusCode: statusCode, StatusCode: statusCode,
Message: fmt.Sprintf(message, args...), Message: fmt.Sprintf(message, args...),
Response: resp,
} }
} }

View File

@@ -7,18 +7,6 @@ import (
"net/http" "net/http"
) )
// NOTE: the GetBody() method on the http.Request object is new in 1.8.
// at present we support 1.7 and 1.8 so for now the branches specific
// to 1.8 have been commented out.
// RetriableRequest provides facilities for retrying an HTTP request.
type RetriableRequest struct {
req *http.Request
//rc io.ReadCloser
br *bytes.Reader
reset bool
}
// NewRetriableRequest returns a wrapper around an HTTP request that support retry logic. // NewRetriableRequest returns a wrapper around an HTTP request that support retry logic.
func NewRetriableRequest(req *http.Request) *RetriableRequest { func NewRetriableRequest(req *http.Request) *RetriableRequest {
return &RetriableRequest{req: req} return &RetriableRequest{req: req}
@@ -29,49 +17,22 @@ func (rr *RetriableRequest) Request() *http.Request {
return rr.req return rr.req
} }
// Prepare signals that the request is about to be sent. func (rr *RetriableRequest) prepareFromByteReader() (err error) {
func (rr *RetriableRequest) Prepare() (err error) { // fall back to making a copy (only do this once)
// preserve the request body; this is to support retry logic as b := []byte{}
// the underlying transport will always close the reqeust body if rr.req.ContentLength > 0 {
if rr.req.Body != nil { b = make([]byte, rr.req.ContentLength)
if rr.reset { _, err = io.ReadFull(rr.req.Body, b)
/*if rr.rc != nil { if err != nil {
rr.req.Body = rr.rc return err
} else */if rr.br != nil {
_, err = rr.br.Seek(0, io.SeekStart)
}
rr.reset = false
if err != nil {
return err
}
} }
/*if rr.req.GetBody != nil { } else {
// this will allow us to preserve the body without having to b, err = ioutil.ReadAll(rr.req.Body)
// make a copy. note we need to do this on each iteration if err != nil {
rr.rc, err = rr.req.GetBody() return err
if err != nil {
return err
}
} else */if rr.br == nil {
// fall back to making a copy (only do this once)
b := []byte{}
if rr.req.ContentLength > 0 {
b = make([]byte, rr.req.ContentLength)
_, err = io.ReadFull(rr.req.Body, b)
if err != nil {
return err
}
} else {
b, err = ioutil.ReadAll(rr.req.Body)
if err != nil {
return err
}
}
rr.br = bytes.NewReader(b)
rr.req.Body = ioutil.NopCloser(rr.br)
} }
// indicates that the request body needs to be reset
rr.reset = true
} }
rr.br = bytes.NewReader(b)
rr.req.Body = ioutil.NopCloser(rr.br)
return err return err
} }

View File

@@ -0,0 +1,44 @@
// +build !go1.8
package autorest
import (
"bytes"
"net/http"
)
// RetriableRequest provides facilities for retrying an HTTP request.
type RetriableRequest struct {
req *http.Request
br *bytes.Reader
reset bool
}
// Prepare signals that the request is about to be sent.
func (rr *RetriableRequest) Prepare() (err error) {
// preserve the request body; this is to support retry logic as
// the underlying transport will always close the reqeust body
if rr.req.Body != nil {
if rr.reset {
if rr.br != nil {
_, err = rr.br.Seek(0, 0 /*io.SeekStart*/)
}
rr.reset = false
if err != nil {
return err
}
}
if rr.br == nil {
// fall back to making a copy (only do this once)
err = rr.prepareFromByteReader()
}
// indicates that the request body needs to be reset
rr.reset = true
}
return err
}
func removeRequestBody(req *http.Request) {
req.Body = nil
req.ContentLength = 0
}

View File

@@ -0,0 +1,56 @@
// +build go1.8
package autorest
import (
"bytes"
"io"
"net/http"
)
// RetriableRequest provides facilities for retrying an HTTP request.
type RetriableRequest struct {
req *http.Request
rc io.ReadCloser
br *bytes.Reader
reset bool
}
// Prepare signals that the request is about to be sent.
func (rr *RetriableRequest) Prepare() (err error) {
// preserve the request body; this is to support retry logic as
// the underlying transport will always close the reqeust body
if rr.req.Body != nil {
if rr.reset {
if rr.rc != nil {
rr.req.Body = rr.rc
} else if rr.br != nil {
_, err = rr.br.Seek(0, io.SeekStart)
}
rr.reset = false
if err != nil {
return err
}
}
if rr.req.GetBody != nil {
// this will allow us to preserve the body without having to
// make a copy. note we need to do this on each iteration
rr.rc, err = rr.req.GetBody()
if err != nil {
return err
}
} else if rr.br == nil {
// fall back to making a copy (only do this once)
err = rr.prepareFromByteReader()
}
// indicates that the request body needs to be reset
rr.reset = true
}
return err
}
func removeRequestBody(req *http.Request) {
req.Body = nil
req.GetBody = nil
req.ContentLength = 0
}

View File

@@ -5,6 +5,7 @@ import (
"log" "log"
"math" "math"
"net/http" "net/http"
"strconv"
"time" "time"
) )
@@ -209,13 +210,31 @@ func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) Se
if err != nil || !ResponseHasStatusCode(resp, codes...) { if err != nil || !ResponseHasStatusCode(resp, codes...) {
return resp, err return resp, err
} }
DelayForBackoff(backoff, attempt, r.Cancel) delayed := DelayWithRetryAfter(resp, r.Cancel)
if !delayed {
DelayForBackoff(backoff, attempt, r.Cancel)
}
} }
return resp, err return resp, err
}) })
} }
} }
// DelayWithRetryAfter invokes time.After for the duration specified in the "Retry-After" header in
// responses with status code 429
func DelayWithRetryAfter(resp *http.Response, cancel <-chan struct{}) bool {
retryAfter, _ := strconv.Atoi(resp.Header.Get("Retry-After"))
if resp.StatusCode == http.StatusTooManyRequests && retryAfter > 0 {
select {
case <-time.After(time.Duration(retryAfter) * time.Second):
return true
case <-cancel:
return false
}
}
return false
}
// DoRetryForDuration returns a SendDecorator that retries the request until the total time is equal // DoRetryForDuration returns a SendDecorator that retries the request until the total time is equal
// to or greater than the specified duration, exponentially backing off between requests using the // to or greater than the specified duration, exponentially backing off between requests using the
// supplied backoff time.Duration (which may be zero). Retrying may be canceled by closing the // supplied backoff time.Duration (which may be zero). Retrying may be canceled by closing the

View File

@@ -33,6 +33,18 @@ err = c.Count("request.count_total", 2, nil, 1)
DogStatsD accepts packets with multiple statsd payloads in them. Using the BufferingClient via `NewBufferingClient` will buffer up commands and send them when the buffer is reached or after 100msec. DogStatsD accepts packets with multiple statsd payloads in them. Using the BufferingClient via `NewBufferingClient` will buffer up commands and send them when the buffer is reached or after 100msec.
## Unix Domain Sockets Client
DogStatsD version 6 accepts packets through a Unix Socket datagram connection. You can use this protocol by giving a
`unix:///path/to/dsd.socket` addr argument to the `New` or `NewBufferingClient`.
With this protocol, writes can become blocking if the server's receiving buffer is full. Our default behaviour is to
timeout and drop the packet after 1 ms. You can set a custom timeout duration via the `SetWriteTimeout` method.
The default mode is to pass write errors from the socket to the caller. This includes write errors the library will
automatically recover from (DogStatsD server not ready yet or is restarting). You can drop these errors and emulate
the UDP behaviour by setting the `SkipErrors` property to `true`. Please note that packets will be dropped in both modes.
## Development ## Development
Run the tests with: Run the tests with:

View File

@@ -29,7 +29,6 @@ import (
"fmt" "fmt"
"io" "io"
"math/rand" "math/rand"
"net"
"strconv" "strconv"
"strings" "strings"
"sync" "sync"
@@ -55,6 +54,12 @@ any number greater than that will see frames being cut out.
*/ */
const MaxUDPPayloadSize = 65467 const MaxUDPPayloadSize = 65467
/*
UnixAddressPrefix holds the prefix to use to enable Unix Domain Socket
traffic instead of UDP.
*/
const UnixAddressPrefix = "unix://"
/* /*
Stat suffixes Stat suffixes
*/ */
@@ -68,34 +73,50 @@ var (
timingSuffix = []byte("|ms") timingSuffix = []byte("|ms")
) )
// A Client is a handle for sending udp messages to dogstatsd. It is safe to // A statsdWriter offers a standard interface regardless of the underlying
// protocol. For now UDS and UPD writers are available.
type statsdWriter interface {
Write(data []byte) error
SetWriteTimeout(time.Duration) error
Close() error
}
// A Client is a handle for sending messages to dogstatsd. It is safe to
// use one Client from multiple goroutines simultaneously. // use one Client from multiple goroutines simultaneously.
type Client struct { type Client struct {
conn net.Conn // Writer handles the underlying networking protocol
writer statsdWriter
// Namespace to prepend to all statsd calls // Namespace to prepend to all statsd calls
Namespace string Namespace string
// Tags are global tags to be added to every statsd call // Tags are global tags to be added to every statsd call
Tags []string Tags []string
// skipErrors turns off error passing and allows UDS to emulate UDP behaviour
SkipErrors bool
// BufferLength is the length of the buffer in commands. // BufferLength is the length of the buffer in commands.
bufferLength int bufferLength int
flushTime time.Duration flushTime time.Duration
commands []string commands []string
buffer bytes.Buffer buffer bytes.Buffer
stop bool stop chan struct{}
sync.Mutex sync.Mutex
} }
// New returns a pointer to a new Client given an addr in the format "hostname:port". // New returns a pointer to a new Client given an addr in the format "hostname:port" or
// "unix:///path/to/socket".
func New(addr string) (*Client, error) { func New(addr string) (*Client, error) {
udpAddr, err := net.ResolveUDPAddr("udp", addr) if strings.HasPrefix(addr, UnixAddressPrefix) {
w, err := newUdsWriter(addr[len(UnixAddressPrefix)-1:])
if err != nil {
return nil, err
}
client := &Client{writer: w}
return client, nil
}
w, err := newUdpWriter(addr)
if err != nil { if err != nil {
return nil, err return nil, err
} }
conn, err := net.DialUDP("udp", nil, udpAddr) client := &Client{writer: w, SkipErrors: false}
if err != nil {
return nil, err
}
client := &Client{conn: conn}
return client, nil return client, nil
} }
@@ -109,6 +130,7 @@ func NewBuffered(addr string, buflen int) (*Client, error) {
client.bufferLength = buflen client.bufferLength = buflen
client.commands = make([]string, 0, buflen) client.commands = make([]string, 0, buflen)
client.flushTime = time.Millisecond * 100 client.flushTime = time.Millisecond * 100
client.stop = make(chan struct{}, 1)
go client.watch() go client.watch()
return client, nil return client, nil
} }
@@ -148,17 +170,27 @@ func (c *Client) format(name string, value interface{}, suffix []byte, tags []st
return buf.String() return buf.String()
} }
// SetWriteTimeout allows the user to set a custom UDS write timeout. Not supported for UDP.
func (c *Client) SetWriteTimeout(d time.Duration) error {
return c.writer.SetWriteTimeout(d)
}
func (c *Client) watch() { func (c *Client) watch() {
for _ = range time.Tick(c.flushTime) { ticker := time.NewTicker(c.flushTime)
if c.stop {
for {
select {
case <-ticker.C:
c.Lock()
if len(c.commands) > 0 {
// FIXME: eating error here
c.flush()
}
c.Unlock()
case <-c.stop:
ticker.Stop()
return return
} }
c.Lock()
if len(c.commands) > 0 {
// FIXME: eating error here
c.flush()
}
c.Unlock()
} }
} }
@@ -228,7 +260,7 @@ func (c *Client) flush() error {
var err error var err error
cmdsFlushed := 0 cmdsFlushed := 0
for i, data := range frames { for i, data := range frames {
_, e := c.conn.Write(data) e := c.writer.Write(data)
if e != nil { if e != nil {
err = e err = e
break break
@@ -258,7 +290,11 @@ func (c *Client) sendMsg(msg string) error {
return c.append(msg) return c.append(msg)
} }
_, err := c.conn.Write([]byte(msg)) err := c.writer.Write([]byte(msg))
if c.SkipErrors {
return nil
}
return err return err
} }
@@ -353,8 +389,11 @@ func (c *Client) Close() error {
if c == nil { if c == nil {
return nil return nil
} }
c.stop = true select {
return c.conn.Close() case c.stop <- struct{}{}:
default:
}
return c.writer.Close()
} }
// Events support // Events support
@@ -482,7 +521,6 @@ func (e Event) Encode(tags ...string) (string, error) {
} }
// ServiceCheck support // ServiceCheck support
type ServiceCheckStatus byte type ServiceCheckStatus byte
const ( const (

41
vendor/github.com/DataDog/datadog-go/statsd/udp.go generated vendored Normal file
View File

@@ -0,0 +1,41 @@
package statsd
import (
"errors"
"net"
"time"
)
// udpWriter is an internal class wrapping around management of UDP connection
type udpWriter struct {
conn net.Conn
}
// New returns a pointer to a new udpWriter given an addr in the format "hostname:port".
func newUdpWriter(addr string) (*udpWriter, error) {
udpAddr, err := net.ResolveUDPAddr("udp", addr)
if err != nil {
return nil, err
}
conn, err := net.DialUDP("udp", nil, udpAddr)
if err != nil {
return nil, err
}
writer := &udpWriter{conn: conn}
return writer, nil
}
// SetWriteTimeout is not needed for UDP, returns error
func (w *udpWriter) SetWriteTimeout(d time.Duration) error {
return errors.New("SetWriteTimeout: not supported for UDP connections")
}
// Write data to the UDP connection with no error handling
func (w *udpWriter) Write(data []byte) error {
_, e := w.conn.Write(data)
return e
}
func (w *udpWriter) Close() error {
return w.conn.Close()
}

65
vendor/github.com/DataDog/datadog-go/statsd/uds.go generated vendored Normal file
View File

@@ -0,0 +1,65 @@
package statsd
import (
"net"
"strings"
"time"
)
/*
UDSTimeout holds the default timeout for UDS socket writes, as they can get
blocking when the receiving buffer is full.
*/
const defaultUDSTimeout = 1 * time.Millisecond
// udsWriter is an internal class wrapping around management of UDS connection
type udsWriter struct {
// Address to send metrics to, needed to allow reconnection on error
addr net.Addr
// Established connection object, or nil if not connected yet
conn net.Conn
// write timeout
writeTimeout time.Duration
}
// New returns a pointer to a new udsWriter given a socket file path as addr.
func newUdsWriter(addr string) (*udsWriter, error) {
udsAddr, err := net.ResolveUnixAddr("unixgram", addr)
if err != nil {
return nil, err
}
// Defer connection to first Write
writer := &udsWriter{addr: udsAddr, conn: nil, writeTimeout: defaultUDSTimeout}
return writer, nil
}
// SetWriteTimeout allows the user to set a custom write timeout
func (w *udsWriter) SetWriteTimeout(d time.Duration) error {
w.writeTimeout = d
return nil
}
// Write data to the UDS connection with write timeout and minimal error handling:
// create the connection if nil, and destroy it if the statsd server has disconnected
func (w *udsWriter) Write(data []byte) error {
// Try connecting (first packet or connection lost)
if w.conn == nil {
conn, err := net.Dial(w.addr.Network(), w.addr.String())
if err != nil {
return err
}
w.conn = conn
}
w.conn.SetWriteDeadline(time.Now().Add(w.writeTimeout))
_, e := w.conn.Write(data)
if e != nil && strings.Contains(e.Error(), "transport endpoint is not connected") {
// Statsd server disconnected, retry connecting at next packet
w.conn = nil
return e
}
return e
}
func (w *udsWriter) Close() error {
return w.conn.Close()
}

View File

@@ -68,10 +68,20 @@ func NewBackupStreamReader(r io.Reader) *BackupStreamReader {
return &BackupStreamReader{r, 0} return &BackupStreamReader{r, 0}
} }
// Next returns the next backup stream and prepares for calls to Write(). It skips the remainder of the current stream if // Next returns the next backup stream and prepares for calls to Read(). It skips the remainder of the current stream if
// it was not completely read. // it was not completely read.
func (r *BackupStreamReader) Next() (*BackupHeader, error) { func (r *BackupStreamReader) Next() (*BackupHeader, error) {
if r.bytesLeft > 0 { if r.bytesLeft > 0 {
if s, ok := r.r.(io.Seeker); ok {
// Make sure Seek on io.SeekCurrent sometimes succeeds
// before trying the actual seek.
if _, err := s.Seek(0, io.SeekCurrent); err == nil {
if _, err = s.Seek(r.bytesLeft, io.SeekCurrent); err != nil {
return nil, err
}
r.bytesLeft = 0
}
}
if _, err := io.Copy(ioutil.Discard, r); err != nil { if _, err := io.Copy(ioutil.Discard, r); err != nil {
return nil, err return nil, err
} }
@@ -220,7 +230,7 @@ type BackupFileWriter struct {
ctx uintptr ctx uintptr
} }
// NewBackupFileWrtier returns a new BackupFileWriter from a file handle. If includeSecurity is true, // NewBackupFileWriter returns a new BackupFileWriter from a file handle. If includeSecurity is true,
// Write() will attempt to restore the security descriptor from the stream. // Write() will attempt to restore the security descriptor from the stream.
func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter { func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter {
w := &BackupFileWriter{f, includeSecurity, 0} w := &BackupFileWriter{f, includeSecurity, 0}

137
vendor/github.com/Microsoft/go-winio/ea.go generated vendored Normal file
View File

@@ -0,0 +1,137 @@
package winio
import (
"bytes"
"encoding/binary"
"errors"
)
type fileFullEaInformation struct {
NextEntryOffset uint32
Flags uint8
NameLength uint8
ValueLength uint16
}
var (
fileFullEaInformationSize = binary.Size(&fileFullEaInformation{})
errInvalidEaBuffer = errors.New("invalid extended attribute buffer")
errEaNameTooLarge = errors.New("extended attribute name too large")
errEaValueTooLarge = errors.New("extended attribute value too large")
)
// ExtendedAttribute represents a single Windows EA.
type ExtendedAttribute struct {
Name string
Value []byte
Flags uint8
}
func parseEa(b []byte) (ea ExtendedAttribute, nb []byte, err error) {
var info fileFullEaInformation
err = binary.Read(bytes.NewReader(b), binary.LittleEndian, &info)
if err != nil {
err = errInvalidEaBuffer
return
}
nameOffset := fileFullEaInformationSize
nameLen := int(info.NameLength)
valueOffset := nameOffset + int(info.NameLength) + 1
valueLen := int(info.ValueLength)
nextOffset := int(info.NextEntryOffset)
if valueLen+valueOffset > len(b) || nextOffset < 0 || nextOffset > len(b) {
err = errInvalidEaBuffer
return
}
ea.Name = string(b[nameOffset : nameOffset+nameLen])
ea.Value = b[valueOffset : valueOffset+valueLen]
ea.Flags = info.Flags
if info.NextEntryOffset != 0 {
nb = b[info.NextEntryOffset:]
}
return
}
// DecodeExtendedAttributes decodes a list of EAs from a FILE_FULL_EA_INFORMATION
// buffer retrieved from BackupRead, ZwQueryEaFile, etc.
func DecodeExtendedAttributes(b []byte) (eas []ExtendedAttribute, err error) {
for len(b) != 0 {
ea, nb, err := parseEa(b)
if err != nil {
return nil, err
}
eas = append(eas, ea)
b = nb
}
return
}
func writeEa(buf *bytes.Buffer, ea *ExtendedAttribute, last bool) error {
if int(uint8(len(ea.Name))) != len(ea.Name) {
return errEaNameTooLarge
}
if int(uint16(len(ea.Value))) != len(ea.Value) {
return errEaValueTooLarge
}
entrySize := uint32(fileFullEaInformationSize + len(ea.Name) + 1 + len(ea.Value))
withPadding := (entrySize + 3) &^ 3
nextOffset := uint32(0)
if !last {
nextOffset = withPadding
}
info := fileFullEaInformation{
NextEntryOffset: nextOffset,
Flags: ea.Flags,
NameLength: uint8(len(ea.Name)),
ValueLength: uint16(len(ea.Value)),
}
err := binary.Write(buf, binary.LittleEndian, &info)
if err != nil {
return err
}
_, err = buf.Write([]byte(ea.Name))
if err != nil {
return err
}
err = buf.WriteByte(0)
if err != nil {
return err
}
_, err = buf.Write(ea.Value)
if err != nil {
return err
}
_, err = buf.Write([]byte{0, 0, 0}[0 : withPadding-entrySize])
if err != nil {
return err
}
return nil
}
// EncodeExtendedAttributes encodes a list of EAs into a FILE_FULL_EA_INFORMATION
// buffer for use with BackupWrite, ZwSetEaFile, etc.
func EncodeExtendedAttributes(eas []ExtendedAttribute) ([]byte, error) {
var buf bytes.Buffer
for i := range eas {
last := false
if i == len(eas)-1 {
last = true
}
err := writeEa(&buf, &eas[i], last)
if err != nil {
return nil, err
}
}
return buf.Bytes(), nil
}

View File

@@ -23,6 +23,13 @@ type atomicBool int32
func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 } func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 }
func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) } func (b *atomicBool) setFalse() { atomic.StoreInt32((*int32)(b), 0) }
func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) } func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) }
func (b *atomicBool) swap(new bool) bool {
var newInt int32
if new {
newInt = 1
}
return atomic.SwapInt32((*int32)(b), newInt) == 1
}
const ( const (
cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1 cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1
@@ -71,7 +78,8 @@ func initIo() {
type win32File struct { type win32File struct {
handle syscall.Handle handle syscall.Handle
wg sync.WaitGroup wg sync.WaitGroup
closing bool wgLock sync.RWMutex
closing atomicBool
readDeadline deadlineHandler readDeadline deadlineHandler
writeDeadline deadlineHandler writeDeadline deadlineHandler
} }
@@ -107,14 +115,18 @@ func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) {
// closeHandle closes the resources associated with a Win32 handle // closeHandle closes the resources associated with a Win32 handle
func (f *win32File) closeHandle() { func (f *win32File) closeHandle() {
if !f.closing { f.wgLock.Lock()
// Atomically set that we are closing, releasing the resources only once.
if !f.closing.swap(true) {
f.wgLock.Unlock()
// cancel all IO and wait for it to complete // cancel all IO and wait for it to complete
f.closing = true
cancelIoEx(f.handle, nil) cancelIoEx(f.handle, nil)
f.wg.Wait() f.wg.Wait()
// at this point, no new IO can start // at this point, no new IO can start
syscall.Close(f.handle) syscall.Close(f.handle)
f.handle = 0 f.handle = 0
} else {
f.wgLock.Unlock()
} }
} }
@@ -127,10 +139,13 @@ func (f *win32File) Close() error {
// prepareIo prepares for a new IO operation. // prepareIo prepares for a new IO operation.
// The caller must call f.wg.Done() when the IO is finished, prior to Close() returning. // The caller must call f.wg.Done() when the IO is finished, prior to Close() returning.
func (f *win32File) prepareIo() (*ioOperation, error) { func (f *win32File) prepareIo() (*ioOperation, error) {
f.wg.Add(1) f.wgLock.RLock()
if f.closing { if f.closing.isSet() {
f.wgLock.RUnlock()
return nil, ErrFileClosed return nil, ErrFileClosed
} }
f.wg.Add(1)
f.wgLock.RUnlock()
c := &ioOperation{} c := &ioOperation{}
c.ch = make(chan ioResult) c.ch = make(chan ioResult)
return c, nil return c, nil
@@ -159,7 +174,7 @@ func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, er
return int(bytes), err return int(bytes), err
} }
if f.closing { if f.closing.isSet() {
cancelIoEx(f.handle, &c.o) cancelIoEx(f.handle, &c.o)
} }
@@ -175,7 +190,7 @@ func (f *win32File) asyncIo(c *ioOperation, d *deadlineHandler, bytes uint32, er
case r = <-c.ch: case r = <-c.ch:
err = r.err err = r.err
if err == syscall.ERROR_OPERATION_ABORTED { if err == syscall.ERROR_OPERATION_ABORTED {
if f.closing { if f.closing.isSet() {
err = ErrFileClosed err = ErrFileClosed
} }
} }

View File

@@ -265,9 +265,9 @@ func (l *win32PipeListener) listenerRoutine() {
if err == nil { if err == nil {
// Wait for the client to connect. // Wait for the client to connect.
ch := make(chan error) ch := make(chan error)
go func() { go func(p *win32File) {
ch <- connectPipe(p) ch <- connectPipe(p)
}() }(p)
select { select {
case err = <-ch: case err = <-ch:
if err != nil { if err != nil {

View File

@@ -1,44 +0,0 @@
go-hdb
======
[![GoDoc](https://godoc.org/github.com/SAP/go-hdb/driver?status.png)](https://godoc.org/github.com/SAP/go-hdb/driver)
Go-hdb is a native Go (golang) HANA database driver for Go's sql package. It implements the SAP HANA SQL command network protocol:
<http://help.sap.com/hana/SAP_HANA_SQL_Command_Network_Protocol_Reference_en.pdf>
## Installation
```
go get github.com/SAP/go-hdb/driver
```
## Documentation
API documentation and documented examples can be found at <https://godoc.org/github.com/SAP/go-hdb/driver>.
## Tests
For running the driver tests a HANA Database server is required. The test user must have privileges to create a schema.
```
go test -dsn hdb://user:password@host:port
```
## Features
* Native Go implementation (no C libraries, CGO).
* Go <http://golang.org/pkg/database/sql> package compliant.
* Support of databse/sql/driver Execer and Queryer interface for parameter free statements and queries.
* Support of bulk inserts.
* Support of UTF-8 to / from CESU-8 encodings for HANA Unicode types.
* Build-in support of HANA decimals as Go rational numbers <http://golang.org/pkg/math/big>.
* Support of Large Object streaming.
* Support of Stored Procedures with table output parameters.
## Dependencies
* <http://golang.org/x/text/transform>
## Todo
* Additional Authentication Methods (actually only basic authentication is supported).

View File

@@ -5,6 +5,7 @@ package circonus
import ( import (
"strings" "strings"
"github.com/armon/go-metrics"
cgm "github.com/circonus-labs/circonus-gometrics" cgm "github.com/circonus-labs/circonus-gometrics"
) )
@@ -61,6 +62,12 @@ func (s *CirconusSink) SetGauge(key []string, val float32) {
s.metrics.SetGauge(flatKey, int64(val)) s.metrics.SetGauge(flatKey, int64(val))
} }
// SetGaugeWithLabels sets value for a gauge metric with the given labels
func (s *CirconusSink) SetGaugeWithLabels(key []string, val float32, labels []metrics.Label) {
flatKey := s.flattenKeyLabels(key, labels)
s.metrics.SetGauge(flatKey, int64(val))
}
// EmitKey is not implemented in circonus // EmitKey is not implemented in circonus
func (s *CirconusSink) EmitKey(key []string, val float32) { func (s *CirconusSink) EmitKey(key []string, val float32) {
// NOP // NOP
@@ -72,12 +79,24 @@ func (s *CirconusSink) IncrCounter(key []string, val float32) {
s.metrics.IncrementByValue(flatKey, uint64(val)) s.metrics.IncrementByValue(flatKey, uint64(val))
} }
// IncrCounterWithLabels increments a counter metric with the given labels
func (s *CirconusSink) IncrCounterWithLabels(key []string, val float32, labels []metrics.Label) {
flatKey := s.flattenKeyLabels(key, labels)
s.metrics.IncrementByValue(flatKey, uint64(val))
}
// AddSample adds a sample to a histogram metric // AddSample adds a sample to a histogram metric
func (s *CirconusSink) AddSample(key []string, val float32) { func (s *CirconusSink) AddSample(key []string, val float32) {
flatKey := s.flattenKey(key) flatKey := s.flattenKey(key)
s.metrics.RecordValue(flatKey, float64(val)) s.metrics.RecordValue(flatKey, float64(val))
} }
// AddSampleWithLabels adds a sample to a histogram metric with the given labels
func (s *CirconusSink) AddSampleWithLabels(key []string, val float32, labels []metrics.Label) {
flatKey := s.flattenKeyLabels(key, labels)
s.metrics.RecordValue(flatKey, float64(val))
}
// Flattens key to Circonus metric name // Flattens key to Circonus metric name
func (s *CirconusSink) flattenKey(parts []string) string { func (s *CirconusSink) flattenKey(parts []string) string {
joined := strings.Join(parts, "`") joined := strings.Join(parts, "`")
@@ -90,3 +109,11 @@ func (s *CirconusSink) flattenKey(parts []string) string {
} }
}, joined) }, joined)
} }
// Flattens the key along with labels for formatting, removes spaces
func (s *CirconusSink) flattenKeyLabels(parts []string, labels []metrics.Label) string {
for _, label := range labels {
parts = append(parts, label.Value)
}
return s.flattenKey(parts)
}

View File

@@ -5,6 +5,7 @@ import (
"strings" "strings"
"github.com/DataDog/datadog-go/statsd" "github.com/DataDog/datadog-go/statsd"
"github.com/armon/go-metrics"
) )
// DogStatsdSink provides a MetricSink that can be used // DogStatsdSink provides a MetricSink that can be used
@@ -45,46 +46,49 @@ func (s *DogStatsdSink) EnableHostNamePropagation() {
func (s *DogStatsdSink) flattenKey(parts []string) string { func (s *DogStatsdSink) flattenKey(parts []string) string {
joined := strings.Join(parts, ".") joined := strings.Join(parts, ".")
return strings.Map(func(r rune) rune { return strings.Map(sanitize, joined)
switch r {
case ':':
fallthrough
case ' ':
return '_'
default:
return r
}
}, joined)
} }
func (s *DogStatsdSink) parseKey(key []string) ([]string, []string) { func sanitize(r rune) rune {
switch r {
case ':':
fallthrough
case ' ':
return '_'
default:
return r
}
}
func (s *DogStatsdSink) parseKey(key []string) ([]string, []metrics.Label) {
// Since DogStatsd supports dimensionality via tags on metric keys, this sink's approach is to splice the hostname out of the key in favor of a `host` tag // Since DogStatsd supports dimensionality via tags on metric keys, this sink's approach is to splice the hostname out of the key in favor of a `host` tag
// The `host` tag is either forced here, or set downstream by the DogStatsd server // The `host` tag is either forced here, or set downstream by the DogStatsd server
var tags []string var labels []metrics.Label
hostName := s.hostName hostName := s.hostName
//Splice the hostname out of the key // Splice the hostname out of the key
for i, el := range key { for i, el := range key {
if el == hostName { if el == hostName {
key = append(key[:i], key[i+1:]...) key = append(key[:i], key[i+1:]...)
break
} }
} }
if s.propagateHostname { if s.propagateHostname {
tags = append(tags, fmt.Sprintf("host:%s", hostName)) labels = append(labels, metrics.Label{"host", hostName})
} }
return key, tags return key, labels
} }
// Implementation of methods in the MetricSink interface // Implementation of methods in the MetricSink interface
func (s *DogStatsdSink) SetGauge(key []string, val float32) { func (s *DogStatsdSink) SetGauge(key []string, val float32) {
s.SetGaugeWithTags(key, val, []string{}) s.SetGaugeWithLabels(key, val, nil)
} }
func (s *DogStatsdSink) IncrCounter(key []string, val float32) { func (s *DogStatsdSink) IncrCounter(key []string, val float32) {
s.IncrCounterWithTags(key, val, []string{}) s.IncrCounterWithLabels(key, val, nil)
} }
// EmitKey is not implemented since DogStatsd does not provide a metric type that holds an // EmitKey is not implemented since DogStatsd does not provide a metric type that holds an
@@ -93,33 +97,44 @@ func (s *DogStatsdSink) EmitKey(key []string, val float32) {
} }
func (s *DogStatsdSink) AddSample(key []string, val float32) { func (s *DogStatsdSink) AddSample(key []string, val float32) {
s.AddSampleWithTags(key, val, []string{}) s.AddSampleWithLabels(key, val, nil)
} }
// The following ...WithTags methods correspond to Datadog's Tag extension to Statsd. // The following ...WithLabels methods correspond to Datadog's Tag extension to Statsd.
// http://docs.datadoghq.com/guides/dogstatsd/#tags // http://docs.datadoghq.com/guides/dogstatsd/#tags
func (s *DogStatsdSink) SetGaugeWithLabels(key []string, val float32, labels []metrics.Label) {
func (s *DogStatsdSink) SetGaugeWithTags(key []string, val float32, tags []string) { flatKey, tags := s.getFlatkeyAndCombinedLabels(key, labels)
flatKey, tags := s.getFlatkeyAndCombinedTags(key, tags)
rate := 1.0 rate := 1.0
s.client.Gauge(flatKey, float64(val), tags, rate) s.client.Gauge(flatKey, float64(val), tags, rate)
} }
func (s *DogStatsdSink) IncrCounterWithTags(key []string, val float32, tags []string) { func (s *DogStatsdSink) IncrCounterWithLabels(key []string, val float32, labels []metrics.Label) {
flatKey, tags := s.getFlatkeyAndCombinedTags(key, tags) flatKey, tags := s.getFlatkeyAndCombinedLabels(key, labels)
rate := 1.0 rate := 1.0
s.client.Count(flatKey, int64(val), tags, rate) s.client.Count(flatKey, int64(val), tags, rate)
} }
func (s *DogStatsdSink) AddSampleWithTags(key []string, val float32, tags []string) { func (s *DogStatsdSink) AddSampleWithLabels(key []string, val float32, labels []metrics.Label) {
flatKey, tags := s.getFlatkeyAndCombinedTags(key, tags) flatKey, tags := s.getFlatkeyAndCombinedLabels(key, labels)
rate := 1.0 rate := 1.0
s.client.TimeInMilliseconds(flatKey, float64(val), tags, rate) s.client.TimeInMilliseconds(flatKey, float64(val), tags, rate)
} }
func (s *DogStatsdSink) getFlatkeyAndCombinedTags(key []string, tags []string) (flattenedKey string, combinedTags []string) { func (s *DogStatsdSink) getFlatkeyAndCombinedLabels(key []string, labels []metrics.Label) (string, []string) {
key, hostTags := s.parseKey(key) key, parsedLabels := s.parseKey(key)
flatKey := s.flattenKey(key) flatKey := s.flattenKey(key)
tags = append(tags, hostTags...) labels = append(labels, parsedLabels...)
var tags []string
for _, label := range labels {
label.Name = strings.Map(sanitize, label.Name)
label.Value = strings.Map(sanitize, label.Value)
if label.Value != "" {
tags = append(tags, fmt.Sprintf("%s:%s", label.Name, label.Value))
} else {
tags = append(tags, label.Name)
}
}
return flatKey, tags return flatKey, tags
} }

View File

@@ -1,6 +1,7 @@
package metrics package metrics
import ( import (
"bytes"
"fmt" "fmt"
"math" "math"
"net/url" "net/url"
@@ -39,7 +40,7 @@ type IntervalMetrics struct {
Interval time.Time Interval time.Time
// Gauges maps the key to the last set value // Gauges maps the key to the last set value
Gauges map[string]float32 Gauges map[string]GaugeValue
// Points maps the string to the list of emitted values // Points maps the string to the list of emitted values
// from EmitKey // from EmitKey
@@ -47,21 +48,21 @@ type IntervalMetrics struct {
// Counters maps the string key to a sum of the counter // Counters maps the string key to a sum of the counter
// values // values
Counters map[string]*AggregateSample Counters map[string]SampledValue
// Samples maps the key to an AggregateSample, // Samples maps the key to an AggregateSample,
// which has the rolled up view of a sample // which has the rolled up view of a sample
Samples map[string]*AggregateSample Samples map[string]SampledValue
} }
// NewIntervalMetrics creates a new IntervalMetrics for a given interval // NewIntervalMetrics creates a new IntervalMetrics for a given interval
func NewIntervalMetrics(intv time.Time) *IntervalMetrics { func NewIntervalMetrics(intv time.Time) *IntervalMetrics {
return &IntervalMetrics{ return &IntervalMetrics{
Interval: intv, Interval: intv,
Gauges: make(map[string]float32), Gauges: make(map[string]GaugeValue),
Points: make(map[string][]float32), Points: make(map[string][]float32),
Counters: make(map[string]*AggregateSample), Counters: make(map[string]SampledValue),
Samples: make(map[string]*AggregateSample), Samples: make(map[string]SampledValue),
} }
} }
@@ -69,12 +70,12 @@ func NewIntervalMetrics(intv time.Time) *IntervalMetrics {
// about a sample // about a sample
type AggregateSample struct { type AggregateSample struct {
Count int // The count of emitted pairs Count int // The count of emitted pairs
Rate float64 // The count of emitted pairs per time unit (usually 1 second) Rate float64 `json:"-"` // The count of emitted pairs per time unit (usually 1 second)
Sum float64 // The sum of values Sum float64 // The sum of values
SumSq float64 // The sum of squared values SumSq float64 `json:"-"` // The sum of squared values
Min float64 // Minimum value Min float64 // Minimum value
Max float64 // Maximum value Max float64 // Maximum value
LastUpdated time.Time // When value was last updated LastUpdated time.Time `json:"-"` // When value was last updated
} }
// Computes a Stddev of the values // Computes a Stddev of the values
@@ -154,12 +155,16 @@ func NewInmemSink(interval, retain time.Duration) *InmemSink {
} }
func (i *InmemSink) SetGauge(key []string, val float32) { func (i *InmemSink) SetGauge(key []string, val float32) {
k := i.flattenKey(key) i.SetGaugeWithLabels(key, val, nil)
}
func (i *InmemSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {
k, name := i.flattenKeyLabels(key, labels)
intv := i.getInterval() intv := i.getInterval()
intv.Lock() intv.Lock()
defer intv.Unlock() defer intv.Unlock()
intv.Gauges[k] = val intv.Gauges[k] = GaugeValue{Name: name, Value: val, Labels: labels}
} }
func (i *InmemSink) EmitKey(key []string, val float32) { func (i *InmemSink) EmitKey(key []string, val float32) {
@@ -173,30 +178,46 @@ func (i *InmemSink) EmitKey(key []string, val float32) {
} }
func (i *InmemSink) IncrCounter(key []string, val float32) { func (i *InmemSink) IncrCounter(key []string, val float32) {
k := i.flattenKey(key) i.IncrCounterWithLabels(key, val, nil)
}
func (i *InmemSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {
k, name := i.flattenKeyLabels(key, labels)
intv := i.getInterval() intv := i.getInterval()
intv.Lock() intv.Lock()
defer intv.Unlock() defer intv.Unlock()
agg := intv.Counters[k] agg, ok := intv.Counters[k]
if agg == nil { if !ok {
agg = &AggregateSample{} agg = SampledValue{
Name: name,
AggregateSample: &AggregateSample{},
Labels: labels,
}
intv.Counters[k] = agg intv.Counters[k] = agg
} }
agg.Ingest(float64(val), i.rateDenom) agg.Ingest(float64(val), i.rateDenom)
} }
func (i *InmemSink) AddSample(key []string, val float32) { func (i *InmemSink) AddSample(key []string, val float32) {
k := i.flattenKey(key) i.AddSampleWithLabels(key, val, nil)
}
func (i *InmemSink) AddSampleWithLabels(key []string, val float32, labels []Label) {
k, name := i.flattenKeyLabels(key, labels)
intv := i.getInterval() intv := i.getInterval()
intv.Lock() intv.Lock()
defer intv.Unlock() defer intv.Unlock()
agg := intv.Samples[k] agg, ok := intv.Samples[k]
if agg == nil { if !ok {
agg = &AggregateSample{} agg = SampledValue{
Name: name,
AggregateSample: &AggregateSample{},
Labels: labels,
}
intv.Samples[k] = agg intv.Samples[k] = agg
} }
agg.Ingest(float64(val), i.rateDenom) agg.Ingest(float64(val), i.rateDenom)
@@ -261,6 +282,38 @@ func (i *InmemSink) getInterval() *IntervalMetrics {
// Flattens the key for formatting, removes spaces // Flattens the key for formatting, removes spaces
func (i *InmemSink) flattenKey(parts []string) string { func (i *InmemSink) flattenKey(parts []string) string {
joined := strings.Join(parts, ".") buf := &bytes.Buffer{}
return strings.Replace(joined, " ", "_", -1) replacer := strings.NewReplacer(" ", "_")
if len(parts) > 0 {
replacer.WriteString(buf, parts[0])
}
for _, part := range parts[1:] {
replacer.WriteString(buf, ".")
replacer.WriteString(buf, part)
}
return buf.String()
}
// Flattens the key for formatting along with its labels, removes spaces
func (i *InmemSink) flattenKeyLabels(parts []string, labels []Label) (string, string) {
buf := &bytes.Buffer{}
replacer := strings.NewReplacer(" ", "_")
if len(parts) > 0 {
replacer.WriteString(buf, parts[0])
}
for _, part := range parts[1:] {
replacer.WriteString(buf, ".")
replacer.WriteString(buf, part)
}
key := buf.String()
for _, label := range labels {
replacer.WriteString(buf, fmt.Sprintf(";%s=%s", label.Name, label.Value))
}
return buf.String(), key
} }

118
vendor/github.com/armon/go-metrics/inmem_endpoint.go generated vendored Normal file
View File

@@ -0,0 +1,118 @@
package metrics
import (
"fmt"
"net/http"
"sort"
"time"
)
// MetricsSummary holds a roll-up of metrics info for a given interval
type MetricsSummary struct {
Timestamp string
Gauges []GaugeValue
Points []PointValue
Counters []SampledValue
Samples []SampledValue
}
type GaugeValue struct {
Name string
Hash string `json:"-"`
Value float32
Labels []Label `json:"-"`
DisplayLabels map[string]string `json:"Labels"`
}
type PointValue struct {
Name string
Points []float32
}
type SampledValue struct {
Name string
Hash string `json:"-"`
*AggregateSample
Mean float64
Stddev float64
Labels []Label `json:"-"`
DisplayLabels map[string]string `json:"Labels"`
}
// DisplayMetrics returns a summary of the metrics from the most recent finished interval.
func (i *InmemSink) DisplayMetrics(resp http.ResponseWriter, req *http.Request) (interface{}, error) {
data := i.Data()
var interval *IntervalMetrics
n := len(data)
switch {
case n == 0:
return nil, fmt.Errorf("no metric intervals have been initialized yet")
case n == 1:
// Show the current interval if it's all we have
interval = i.intervals[0]
default:
// Show the most recent finished interval if we have one
interval = i.intervals[n-2]
}
summary := MetricsSummary{
Timestamp: interval.Interval.Round(time.Second).UTC().String(),
Gauges: make([]GaugeValue, 0, len(interval.Gauges)),
Points: make([]PointValue, 0, len(interval.Points)),
}
// Format and sort the output of each metric type, so it gets displayed in a
// deterministic order.
for name, points := range interval.Points {
summary.Points = append(summary.Points, PointValue{name, points})
}
sort.Slice(summary.Points, func(i, j int) bool {
return summary.Points[i].Name < summary.Points[j].Name
})
for hash, value := range interval.Gauges {
value.Hash = hash
value.DisplayLabels = make(map[string]string)
for _, label := range value.Labels {
value.DisplayLabels[label.Name] = label.Value
}
value.Labels = nil
summary.Gauges = append(summary.Gauges, value)
}
sort.Slice(summary.Gauges, func(i, j int) bool {
return summary.Gauges[i].Hash < summary.Gauges[j].Hash
})
summary.Counters = formatSamples(interval.Counters)
summary.Samples = formatSamples(interval.Samples)
return summary, nil
}
func formatSamples(source map[string]SampledValue) []SampledValue {
output := make([]SampledValue, 0, len(source))
for hash, sample := range source {
displayLabels := make(map[string]string)
for _, label := range sample.Labels {
displayLabels[label.Name] = label.Value
}
output = append(output, SampledValue{
Name: sample.Name,
Hash: hash,
AggregateSample: sample.AggregateSample,
Mean: sample.AggregateSample.Mean(),
Stddev: sample.AggregateSample.Stddev(),
DisplayLabels: displayLabels,
})
}
sort.Slice(output, func(i, j int) bool {
return output[i].Hash < output[j].Hash
})
return output
}

View File

@@ -6,6 +6,7 @@ import (
"io" "io"
"os" "os"
"os/signal" "os/signal"
"strings"
"sync" "sync"
"syscall" "syscall"
) )
@@ -75,22 +76,25 @@ func (i *InmemSignal) dumpStats() {
data := i.inm.Data() data := i.inm.Data()
// Skip the last period which is still being aggregated // Skip the last period which is still being aggregated
for i := 0; i < len(data)-1; i++ { for j := 0; j < len(data)-1; j++ {
intv := data[i] intv := data[j]
intv.RLock() intv.RLock()
for name, val := range intv.Gauges { for _, val := range intv.Gauges {
fmt.Fprintf(buf, "[%v][G] '%s': %0.3f\n", intv.Interval, name, val) name := i.flattenLabels(val.Name, val.Labels)
fmt.Fprintf(buf, "[%v][G] '%s': %0.3f\n", intv.Interval, name, val.Value)
} }
for name, vals := range intv.Points { for name, vals := range intv.Points {
for _, val := range vals { for _, val := range vals {
fmt.Fprintf(buf, "[%v][P] '%s': %0.3f\n", intv.Interval, name, val) fmt.Fprintf(buf, "[%v][P] '%s': %0.3f\n", intv.Interval, name, val)
} }
} }
for name, agg := range intv.Counters { for _, agg := range intv.Counters {
fmt.Fprintf(buf, "[%v][C] '%s': %s\n", intv.Interval, name, agg) name := i.flattenLabels(agg.Name, agg.Labels)
fmt.Fprintf(buf, "[%v][C] '%s': %s\n", intv.Interval, name, agg.AggregateSample)
} }
for name, agg := range intv.Samples { for _, agg := range intv.Samples {
fmt.Fprintf(buf, "[%v][S] '%s': %s\n", intv.Interval, name, agg) name := i.flattenLabels(agg.Name, agg.Labels)
fmt.Fprintf(buf, "[%v][S] '%s': %s\n", intv.Interval, name, agg.AggregateSample)
} }
intv.RUnlock() intv.RUnlock()
} }
@@ -98,3 +102,16 @@ func (i *InmemSignal) dumpStats() {
// Write out the bytes // Write out the bytes
i.w.Write(buf.Bytes()) i.w.Write(buf.Bytes())
} }
// Flattens the key for formatting along with its labels, removes spaces
func (i *InmemSignal) flattenLabels(name string, labels []Label) string {
buf := bytes.NewBufferString(name)
replacer := strings.NewReplacer(" ", "_", ":", "_")
for _, label := range labels {
replacer.WriteString(buf, ".")
replacer.WriteString(buf, label.Value)
}
return buf.String()
}

View File

@@ -2,20 +2,43 @@ package metrics
import ( import (
"runtime" "runtime"
"strings"
"time" "time"
"github.com/hashicorp/go-immutable-radix"
) )
type Label struct {
Name string
Value string
}
func (m *Metrics) SetGauge(key []string, val float32) { func (m *Metrics) SetGauge(key []string, val float32) {
if m.HostName != "" && m.EnableHostname { m.SetGaugeWithLabels(key, val, nil)
key = insert(0, m.HostName, key) }
func (m *Metrics) SetGaugeWithLabels(key []string, val float32, labels []Label) {
if m.HostName != "" {
if m.EnableHostnameLabel {
labels = append(labels, Label{"host", m.HostName})
} else if m.EnableHostname {
key = insert(0, m.HostName, key)
}
} }
if m.EnableTypePrefix { if m.EnableTypePrefix {
key = insert(0, "gauge", key) key = insert(0, "gauge", key)
} }
if m.ServiceName != "" { if m.ServiceName != "" {
key = insert(0, m.ServiceName, key) if m.EnableServiceLabel {
labels = append(labels, Label{"service", m.ServiceName})
} else {
key = insert(0, m.ServiceName, key)
}
} }
m.sink.SetGauge(key, val) if !m.allowMetric(key) {
return
}
m.sink.SetGaugeWithLabels(key, val, labels)
} }
func (m *Metrics) EmitKey(key []string, val float32) { func (m *Metrics) EmitKey(key []string, val float32) {
@@ -25,40 +48,118 @@ func (m *Metrics) EmitKey(key []string, val float32) {
if m.ServiceName != "" { if m.ServiceName != "" {
key = insert(0, m.ServiceName, key) key = insert(0, m.ServiceName, key)
} }
if !m.allowMetric(key) {
return
}
m.sink.EmitKey(key, val) m.sink.EmitKey(key, val)
} }
func (m *Metrics) IncrCounter(key []string, val float32) { func (m *Metrics) IncrCounter(key []string, val float32) {
m.IncrCounterWithLabels(key, val, nil)
}
func (m *Metrics) IncrCounterWithLabels(key []string, val float32, labels []Label) {
if m.HostName != "" && m.EnableHostnameLabel {
labels = append(labels, Label{"host", m.HostName})
}
if m.EnableTypePrefix { if m.EnableTypePrefix {
key = insert(0, "counter", key) key = insert(0, "counter", key)
} }
if m.ServiceName != "" { if m.ServiceName != "" {
key = insert(0, m.ServiceName, key) if m.EnableServiceLabel {
labels = append(labels, Label{"service", m.ServiceName})
} else {
key = insert(0, m.ServiceName, key)
}
} }
m.sink.IncrCounter(key, val) if !m.allowMetric(key) {
return
}
m.sink.IncrCounterWithLabels(key, val, labels)
} }
func (m *Metrics) AddSample(key []string, val float32) { func (m *Metrics) AddSample(key []string, val float32) {
m.AddSampleWithLabels(key, val, nil)
}
func (m *Metrics) AddSampleWithLabels(key []string, val float32, labels []Label) {
if m.HostName != "" && m.EnableHostnameLabel {
labels = append(labels, Label{"host", m.HostName})
}
if m.EnableTypePrefix { if m.EnableTypePrefix {
key = insert(0, "sample", key) key = insert(0, "sample", key)
} }
if m.ServiceName != "" { if m.ServiceName != "" {
key = insert(0, m.ServiceName, key) if m.EnableServiceLabel {
labels = append(labels, Label{"service", m.ServiceName})
} else {
key = insert(0, m.ServiceName, key)
}
} }
m.sink.AddSample(key, val) if !m.allowMetric(key) {
return
}
m.sink.AddSampleWithLabels(key, val, labels)
} }
func (m *Metrics) MeasureSince(key []string, start time.Time) { func (m *Metrics) MeasureSince(key []string, start time.Time) {
m.MeasureSinceWithLabels(key, start, nil)
}
func (m *Metrics) MeasureSinceWithLabels(key []string, start time.Time, labels []Label) {
if m.HostName != "" && m.EnableHostnameLabel {
labels = append(labels, Label{"host", m.HostName})
}
if m.EnableTypePrefix { if m.EnableTypePrefix {
key = insert(0, "timer", key) key = insert(0, "timer", key)
} }
if m.ServiceName != "" { if m.ServiceName != "" {
key = insert(0, m.ServiceName, key) if m.EnableServiceLabel {
labels = append(labels, Label{"service", m.ServiceName})
} else {
key = insert(0, m.ServiceName, key)
}
}
if !m.allowMetric(key) {
return
} }
now := time.Now() now := time.Now()
elapsed := now.Sub(start) elapsed := now.Sub(start)
msec := float32(elapsed.Nanoseconds()) / float32(m.TimerGranularity) msec := float32(elapsed.Nanoseconds()) / float32(m.TimerGranularity)
m.sink.AddSample(key, msec) m.sink.AddSampleWithLabels(key, msec, labels)
}
// UpdateFilter overwrites the existing filter with the given rules.
func (m *Metrics) UpdateFilter(allow, block []string) {
m.filterLock.Lock()
defer m.filterLock.Unlock()
m.AllowedPrefixes = allow
m.BlockedPrefixes = block
m.filter = iradix.New()
for _, prefix := range m.AllowedPrefixes {
m.filter, _, _ = m.filter.Insert([]byte(prefix), true)
}
for _, prefix := range m.BlockedPrefixes {
m.filter, _, _ = m.filter.Insert([]byte(prefix), false)
}
}
// Returns whether the metric should be allowed based on configured prefix filters
func (m *Metrics) allowMetric(key []string) bool {
m.filterLock.RLock()
defer m.filterLock.RUnlock()
if m.filter == nil || m.filter.Len() == 0 {
return m.Config.FilterDefault
}
_, allowed, ok := m.filter.Root().LongestPrefix([]byte(strings.Join(key, ".")))
if !ok {
return m.Config.FilterDefault
}
return allowed.(bool)
} }
// Periodically collects runtime stats to publish // Periodically collects runtime stats to publish

View File

@@ -10,31 +10,41 @@ import (
type MetricSink interface { type MetricSink interface {
// A Gauge should retain the last value it is set to // A Gauge should retain the last value it is set to
SetGauge(key []string, val float32) SetGauge(key []string, val float32)
SetGaugeWithLabels(key []string, val float32, labels []Label)
// Should emit a Key/Value pair for each call // Should emit a Key/Value pair for each call
EmitKey(key []string, val float32) EmitKey(key []string, val float32)
// Counters should accumulate values // Counters should accumulate values
IncrCounter(key []string, val float32) IncrCounter(key []string, val float32)
IncrCounterWithLabels(key []string, val float32, labels []Label)
// Samples are for timing information, where quantiles are used // Samples are for timing information, where quantiles are used
AddSample(key []string, val float32) AddSample(key []string, val float32)
AddSampleWithLabels(key []string, val float32, labels []Label)
} }
// BlackholeSink is used to just blackhole messages // BlackholeSink is used to just blackhole messages
type BlackholeSink struct{} type BlackholeSink struct{}
func (*BlackholeSink) SetGauge(key []string, val float32) {} func (*BlackholeSink) SetGauge(key []string, val float32) {}
func (*BlackholeSink) EmitKey(key []string, val float32) {} func (*BlackholeSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {}
func (*BlackholeSink) IncrCounter(key []string, val float32) {} func (*BlackholeSink) EmitKey(key []string, val float32) {}
func (*BlackholeSink) AddSample(key []string, val float32) {} func (*BlackholeSink) IncrCounter(key []string, val float32) {}
func (*BlackholeSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {}
func (*BlackholeSink) AddSample(key []string, val float32) {}
func (*BlackholeSink) AddSampleWithLabels(key []string, val float32, labels []Label) {}
// FanoutSink is used to sink to fanout values to multiple sinks // FanoutSink is used to sink to fanout values to multiple sinks
type FanoutSink []MetricSink type FanoutSink []MetricSink
func (fh FanoutSink) SetGauge(key []string, val float32) { func (fh FanoutSink) SetGauge(key []string, val float32) {
fh.SetGaugeWithLabels(key, val, nil)
}
func (fh FanoutSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {
for _, s := range fh { for _, s := range fh {
s.SetGauge(key, val) s.SetGaugeWithLabels(key, val, labels)
} }
} }
@@ -45,14 +55,22 @@ func (fh FanoutSink) EmitKey(key []string, val float32) {
} }
func (fh FanoutSink) IncrCounter(key []string, val float32) { func (fh FanoutSink) IncrCounter(key []string, val float32) {
fh.IncrCounterWithLabels(key, val, nil)
}
func (fh FanoutSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {
for _, s := range fh { for _, s := range fh {
s.IncrCounter(key, val) s.IncrCounterWithLabels(key, val, labels)
} }
} }
func (fh FanoutSink) AddSample(key []string, val float32) { func (fh FanoutSink) AddSample(key []string, val float32) {
fh.AddSampleWithLabels(key, val, nil)
}
func (fh FanoutSink) AddSampleWithLabels(key []string, val float32, labels []Label) {
for _, s := range fh { for _, s := range fh {
s.AddSample(key, val) s.AddSampleWithLabels(key, val, labels)
} }
} }

View File

@@ -2,8 +2,11 @@ package metrics
import ( import (
"os" "os"
"sync"
"sync/atomic" "sync/atomic"
"time" "time"
"github.com/hashicorp/go-immutable-radix"
) )
// Config is used to configure metrics settings // Config is used to configure metrics settings
@@ -11,18 +14,26 @@ type Config struct {
ServiceName string // Prefixed with keys to seperate services ServiceName string // Prefixed with keys to seperate services
HostName string // Hostname to use. If not provided and EnableHostname, it will be os.Hostname HostName string // Hostname to use. If not provided and EnableHostname, it will be os.Hostname
EnableHostname bool // Enable prefixing gauge values with hostname EnableHostname bool // Enable prefixing gauge values with hostname
EnableHostnameLabel bool // Enable adding hostname to labels
EnableServiceLabel bool // Enable adding service to labels
EnableRuntimeMetrics bool // Enables profiling of runtime metrics (GC, Goroutines, Memory) EnableRuntimeMetrics bool // Enables profiling of runtime metrics (GC, Goroutines, Memory)
EnableTypePrefix bool // Prefixes key with a type ("counter", "gauge", "timer") EnableTypePrefix bool // Prefixes key with a type ("counter", "gauge", "timer")
TimerGranularity time.Duration // Granularity of timers. TimerGranularity time.Duration // Granularity of timers.
ProfileInterval time.Duration // Interval to profile runtime metrics ProfileInterval time.Duration // Interval to profile runtime metrics
AllowedPrefixes []string // A list of metric prefixes to allow, with '.' as the separator
BlockedPrefixes []string // A list of metric prefixes to block, with '.' as the separator
FilterDefault bool // Whether to allow metrics by default
} }
// Metrics represents an instance of a metrics sink that can // Metrics represents an instance of a metrics sink that can
// be used to emit // be used to emit
type Metrics struct { type Metrics struct {
Config Config
lastNumGC uint32 lastNumGC uint32
sink MetricSink sink MetricSink
filter *iradix.Tree
filterLock sync.RWMutex
} }
// Shared global metrics instance // Shared global metrics instance
@@ -43,6 +54,7 @@ func DefaultConfig(serviceName string) *Config {
EnableTypePrefix: false, // Disable type prefix EnableTypePrefix: false, // Disable type prefix
TimerGranularity: time.Millisecond, // Timers are in milliseconds TimerGranularity: time.Millisecond, // Timers are in milliseconds
ProfileInterval: time.Second, // Poll runtime every second ProfileInterval: time.Second, // Poll runtime every second
FilterDefault: true, // Don't filter metrics by default
} }
// Try to get the hostname // Try to get the hostname
@@ -56,6 +68,7 @@ func New(conf *Config, sink MetricSink) (*Metrics, error) {
met := &Metrics{} met := &Metrics{}
met.Config = *conf met.Config = *conf
met.sink = sink met.sink = sink
met.UpdateFilter(conf.AllowedPrefixes, conf.BlockedPrefixes)
// Start the runtime collector // Start the runtime collector
if conf.EnableRuntimeMetrics { if conf.EnableRuntimeMetrics {
@@ -79,6 +92,10 @@ func SetGauge(key []string, val float32) {
globalMetrics.Load().(*Metrics).SetGauge(key, val) globalMetrics.Load().(*Metrics).SetGauge(key, val)
} }
func SetGaugeWithLabels(key []string, val float32, labels []Label) {
globalMetrics.Load().(*Metrics).SetGaugeWithLabels(key, val, labels)
}
func EmitKey(key []string, val float32) { func EmitKey(key []string, val float32) {
globalMetrics.Load().(*Metrics).EmitKey(key, val) globalMetrics.Load().(*Metrics).EmitKey(key, val)
} }
@@ -87,10 +104,26 @@ func IncrCounter(key []string, val float32) {
globalMetrics.Load().(*Metrics).IncrCounter(key, val) globalMetrics.Load().(*Metrics).IncrCounter(key, val)
} }
func IncrCounterWithLabels(key []string, val float32, labels []Label) {
globalMetrics.Load().(*Metrics).IncrCounterWithLabels(key, val, labels)
}
func AddSample(key []string, val float32) { func AddSample(key []string, val float32) {
globalMetrics.Load().(*Metrics).AddSample(key, val) globalMetrics.Load().(*Metrics).AddSample(key, val)
} }
func AddSampleWithLabels(key []string, val float32, labels []Label) {
globalMetrics.Load().(*Metrics).AddSampleWithLabels(key, val, labels)
}
func MeasureSince(key []string, start time.Time) { func MeasureSince(key []string, start time.Time) {
globalMetrics.Load().(*Metrics).MeasureSince(key, start) globalMetrics.Load().(*Metrics).MeasureSince(key, start)
} }
func MeasureSinceWithLabels(key []string, start time.Time, labels []Label) {
globalMetrics.Load().(*Metrics).MeasureSinceWithLabels(key, start, labels)
}
func UpdateFilter(allow, block []string) {
globalMetrics.Load().(*Metrics).UpdateFilter(allow, block)
}

View File

@@ -50,6 +50,11 @@ func (s *StatsdSink) SetGauge(key []string, val float32) {
s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val))
} }
func (s *StatsdSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {
flatKey := s.flattenKeyLabels(key, labels)
s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val))
}
func (s *StatsdSink) EmitKey(key []string, val float32) { func (s *StatsdSink) EmitKey(key []string, val float32) {
flatKey := s.flattenKey(key) flatKey := s.flattenKey(key)
s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val)) s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val))
@@ -60,11 +65,21 @@ func (s *StatsdSink) IncrCounter(key []string, val float32) {
s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val))
} }
func (s *StatsdSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {
flatKey := s.flattenKeyLabels(key, labels)
s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val))
}
func (s *StatsdSink) AddSample(key []string, val float32) { func (s *StatsdSink) AddSample(key []string, val float32) {
flatKey := s.flattenKey(key) flatKey := s.flattenKey(key)
s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val))
} }
func (s *StatsdSink) AddSampleWithLabels(key []string, val float32, labels []Label) {
flatKey := s.flattenKeyLabels(key, labels)
s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val))
}
// Flattens the key for formatting, removes spaces // Flattens the key for formatting, removes spaces
func (s *StatsdSink) flattenKey(parts []string) string { func (s *StatsdSink) flattenKey(parts []string) string {
joined := strings.Join(parts, ".") joined := strings.Join(parts, ".")
@@ -80,6 +95,14 @@ func (s *StatsdSink) flattenKey(parts []string) string {
}, joined) }, joined)
} }
// Flattens the key along with labels for formatting, removes spaces
func (s *StatsdSink) flattenKeyLabels(parts []string, labels []Label) string {
for _, label := range labels {
parts = append(parts, label.Value)
}
return s.flattenKey(parts)
}
// Does a non-blocking push to the metrics queue // Does a non-blocking push to the metrics queue
func (s *StatsdSink) pushMetric(m string) { func (s *StatsdSink) pushMetric(m string) {
select { select {

View File

@@ -50,6 +50,11 @@ func (s *StatsiteSink) SetGauge(key []string, val float32) {
s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val))
} }
func (s *StatsiteSink) SetGaugeWithLabels(key []string, val float32, labels []Label) {
flatKey := s.flattenKeyLabels(key, labels)
s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val))
}
func (s *StatsiteSink) EmitKey(key []string, val float32) { func (s *StatsiteSink) EmitKey(key []string, val float32) {
flatKey := s.flattenKey(key) flatKey := s.flattenKey(key)
s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val)) s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val))
@@ -60,11 +65,21 @@ func (s *StatsiteSink) IncrCounter(key []string, val float32) {
s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val))
} }
func (s *StatsiteSink) IncrCounterWithLabels(key []string, val float32, labels []Label) {
flatKey := s.flattenKeyLabels(key, labels)
s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val))
}
func (s *StatsiteSink) AddSample(key []string, val float32) { func (s *StatsiteSink) AddSample(key []string, val float32) {
flatKey := s.flattenKey(key) flatKey := s.flattenKey(key)
s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val))
} }
func (s *StatsiteSink) AddSampleWithLabels(key []string, val float32, labels []Label) {
flatKey := s.flattenKeyLabels(key, labels)
s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val))
}
// Flattens the key for formatting, removes spaces // Flattens the key for formatting, removes spaces
func (s *StatsiteSink) flattenKey(parts []string) string { func (s *StatsiteSink) flattenKey(parts []string) string {
joined := strings.Join(parts, ".") joined := strings.Join(parts, ".")
@@ -80,6 +95,14 @@ func (s *StatsiteSink) flattenKey(parts []string) string {
}, joined) }, joined)
} }
// Flattens the key along with labels for formatting, removes spaces
func (s *StatsiteSink) flattenKeyLabels(parts []string, labels []Label) string {
for _, label := range labels {
parts = append(parts, label.Value)
}
return s.flattenKey(parts)
}
// Does a non-blocking push to the metrics queue // Does a non-blocking push to the metrics queue
func (s *StatsiteSink) pushMetric(m string) { func (s *StatsiteSink) pushMetric(m string) {
select { select {

View File

@@ -292,6 +292,53 @@ DELETE:
return leaf.val, true return leaf.val, true
} }
// DeletePrefix is used to delete the subtree under a prefix
// Returns how many nodes were deleted
// Use this to delete large subtrees efficiently
func (t *Tree) DeletePrefix(s string) int {
return t.deletePrefix(nil, t.root, s)
}
// delete does a recursive deletion
func (t *Tree) deletePrefix(parent, n *node, prefix string) int {
// Check for key exhaustion
if len(prefix) == 0 {
// Remove the leaf node
subTreeSize := 0
//recursively walk from all edges of the node to be deleted
recursiveWalk(n, func(s string, v interface{}) bool {
subTreeSize++
return false
})
if n.isLeaf() {
n.leaf = nil
}
n.edges = nil // deletes the entire subtree
// Check if we should merge the parent's other child
if parent != nil && parent != t.root && len(parent.edges) == 1 && !parent.isLeaf() {
parent.mergeChild()
}
t.size -= subTreeSize
return subTreeSize
}
// Look for an edge
label := prefix[0]
child := n.getEdge(label)
if child == nil || (!strings.HasPrefix(child.prefix, prefix) && !strings.HasPrefix(prefix, child.prefix)) {
return 0
}
// Consume the search prefix
if len(child.prefix) > len(prefix) {
prefix = prefix[len(prefix):]
} else {
prefix = prefix[len(child.prefix):]
}
return t.deletePrefix(n, child, prefix)
}
func (n *node) mergeChild() { func (n *node) mergeChild() {
e := n.edges[0] e := n.edges[0]
child := e.node child := e.node

View File

@@ -134,6 +134,8 @@ func IsISBN10(str string) bool
func IsISBN13(str string) bool func IsISBN13(str string) bool
func IsISO3166Alpha2(str string) bool func IsISO3166Alpha2(str string) bool
func IsISO3166Alpha3(str string) bool func IsISO3166Alpha3(str string) bool
func IsISO693Alpha2(str string) bool
func IsISO693Alpha3b(str string) bool
func IsISO4217(str string) bool func IsISO4217(str string) bool
func IsIn(str string, params ...string) bool func IsIn(str string, params ...string) bool
func IsInt(str string) bool func IsInt(str string) bool

View File

@@ -416,3 +416,198 @@ var ISO4217List = []string{
"YER", "YER",
"ZAR", "ZMW", "ZWL", "ZAR", "ZMW", "ZWL",
} }
// ISO693Entry stores ISO language codes
type ISO693Entry struct {
Alpha3bCode string
Alpha2Code string
English string
}
//ISO693List based on http://data.okfn.org/data/core/language-codes/r/language-codes-3b2.json
var ISO693List = []ISO693Entry{
{Alpha3bCode: "aar", Alpha2Code: "aa", English: "Afar"},
{Alpha3bCode: "abk", Alpha2Code: "ab", English: "Abkhazian"},
{Alpha3bCode: "afr", Alpha2Code: "af", English: "Afrikaans"},
{Alpha3bCode: "aka", Alpha2Code: "ak", English: "Akan"},
{Alpha3bCode: "alb", Alpha2Code: "sq", English: "Albanian"},
{Alpha3bCode: "amh", Alpha2Code: "am", English: "Amharic"},
{Alpha3bCode: "ara", Alpha2Code: "ar", English: "Arabic"},
{Alpha3bCode: "arg", Alpha2Code: "an", English: "Aragonese"},
{Alpha3bCode: "arm", Alpha2Code: "hy", English: "Armenian"},
{Alpha3bCode: "asm", Alpha2Code: "as", English: "Assamese"},
{Alpha3bCode: "ava", Alpha2Code: "av", English: "Avaric"},
{Alpha3bCode: "ave", Alpha2Code: "ae", English: "Avestan"},
{Alpha3bCode: "aym", Alpha2Code: "ay", English: "Aymara"},
{Alpha3bCode: "aze", Alpha2Code: "az", English: "Azerbaijani"},
{Alpha3bCode: "bak", Alpha2Code: "ba", English: "Bashkir"},
{Alpha3bCode: "bam", Alpha2Code: "bm", English: "Bambara"},
{Alpha3bCode: "baq", Alpha2Code: "eu", English: "Basque"},
{Alpha3bCode: "bel", Alpha2Code: "be", English: "Belarusian"},
{Alpha3bCode: "ben", Alpha2Code: "bn", English: "Bengali"},
{Alpha3bCode: "bih", Alpha2Code: "bh", English: "Bihari languages"},
{Alpha3bCode: "bis", Alpha2Code: "bi", English: "Bislama"},
{Alpha3bCode: "bos", Alpha2Code: "bs", English: "Bosnian"},
{Alpha3bCode: "bre", Alpha2Code: "br", English: "Breton"},
{Alpha3bCode: "bul", Alpha2Code: "bg", English: "Bulgarian"},
{Alpha3bCode: "bur", Alpha2Code: "my", English: "Burmese"},
{Alpha3bCode: "cat", Alpha2Code: "ca", English: "Catalan; Valencian"},
{Alpha3bCode: "cha", Alpha2Code: "ch", English: "Chamorro"},
{Alpha3bCode: "che", Alpha2Code: "ce", English: "Chechen"},
{Alpha3bCode: "chi", Alpha2Code: "zh", English: "Chinese"},
{Alpha3bCode: "chu", Alpha2Code: "cu", English: "Church Slavic; Old Slavonic; Church Slavonic; Old Bulgarian; Old Church Slavonic"},
{Alpha3bCode: "chv", Alpha2Code: "cv", English: "Chuvash"},
{Alpha3bCode: "cor", Alpha2Code: "kw", English: "Cornish"},
{Alpha3bCode: "cos", Alpha2Code: "co", English: "Corsican"},
{Alpha3bCode: "cre", Alpha2Code: "cr", English: "Cree"},
{Alpha3bCode: "cze", Alpha2Code: "cs", English: "Czech"},
{Alpha3bCode: "dan", Alpha2Code: "da", English: "Danish"},
{Alpha3bCode: "div", Alpha2Code: "dv", English: "Divehi; Dhivehi; Maldivian"},
{Alpha3bCode: "dut", Alpha2Code: "nl", English: "Dutch; Flemish"},
{Alpha3bCode: "dzo", Alpha2Code: "dz", English: "Dzongkha"},
{Alpha3bCode: "eng", Alpha2Code: "en", English: "English"},
{Alpha3bCode: "epo", Alpha2Code: "eo", English: "Esperanto"},
{Alpha3bCode: "est", Alpha2Code: "et", English: "Estonian"},
{Alpha3bCode: "ewe", Alpha2Code: "ee", English: "Ewe"},
{Alpha3bCode: "fao", Alpha2Code: "fo", English: "Faroese"},
{Alpha3bCode: "fij", Alpha2Code: "fj", English: "Fijian"},
{Alpha3bCode: "fin", Alpha2Code: "fi", English: "Finnish"},
{Alpha3bCode: "fre", Alpha2Code: "fr", English: "French"},
{Alpha3bCode: "fry", Alpha2Code: "fy", English: "Western Frisian"},
{Alpha3bCode: "ful", Alpha2Code: "ff", English: "Fulah"},
{Alpha3bCode: "geo", Alpha2Code: "ka", English: "Georgian"},
{Alpha3bCode: "ger", Alpha2Code: "de", English: "German"},
{Alpha3bCode: "gla", Alpha2Code: "gd", English: "Gaelic; Scottish Gaelic"},
{Alpha3bCode: "gle", Alpha2Code: "ga", English: "Irish"},
{Alpha3bCode: "glg", Alpha2Code: "gl", English: "Galician"},
{Alpha3bCode: "glv", Alpha2Code: "gv", English: "Manx"},
{Alpha3bCode: "gre", Alpha2Code: "el", English: "Greek, Modern (1453-)"},
{Alpha3bCode: "grn", Alpha2Code: "gn", English: "Guarani"},
{Alpha3bCode: "guj", Alpha2Code: "gu", English: "Gujarati"},
{Alpha3bCode: "hat", Alpha2Code: "ht", English: "Haitian; Haitian Creole"},
{Alpha3bCode: "hau", Alpha2Code: "ha", English: "Hausa"},
{Alpha3bCode: "heb", Alpha2Code: "he", English: "Hebrew"},
{Alpha3bCode: "her", Alpha2Code: "hz", English: "Herero"},
{Alpha3bCode: "hin", Alpha2Code: "hi", English: "Hindi"},
{Alpha3bCode: "hmo", Alpha2Code: "ho", English: "Hiri Motu"},
{Alpha3bCode: "hrv", Alpha2Code: "hr", English: "Croatian"},
{Alpha3bCode: "hun", Alpha2Code: "hu", English: "Hungarian"},
{Alpha3bCode: "ibo", Alpha2Code: "ig", English: "Igbo"},
{Alpha3bCode: "ice", Alpha2Code: "is", English: "Icelandic"},
{Alpha3bCode: "ido", Alpha2Code: "io", English: "Ido"},
{Alpha3bCode: "iii", Alpha2Code: "ii", English: "Sichuan Yi; Nuosu"},
{Alpha3bCode: "iku", Alpha2Code: "iu", English: "Inuktitut"},
{Alpha3bCode: "ile", Alpha2Code: "ie", English: "Interlingue; Occidental"},
{Alpha3bCode: "ina", Alpha2Code: "ia", English: "Interlingua (International Auxiliary Language Association)"},
{Alpha3bCode: "ind", Alpha2Code: "id", English: "Indonesian"},
{Alpha3bCode: "ipk", Alpha2Code: "ik", English: "Inupiaq"},
{Alpha3bCode: "ita", Alpha2Code: "it", English: "Italian"},
{Alpha3bCode: "jav", Alpha2Code: "jv", English: "Javanese"},
{Alpha3bCode: "jpn", Alpha2Code: "ja", English: "Japanese"},
{Alpha3bCode: "kal", Alpha2Code: "kl", English: "Kalaallisut; Greenlandic"},
{Alpha3bCode: "kan", Alpha2Code: "kn", English: "Kannada"},
{Alpha3bCode: "kas", Alpha2Code: "ks", English: "Kashmiri"},
{Alpha3bCode: "kau", Alpha2Code: "kr", English: "Kanuri"},
{Alpha3bCode: "kaz", Alpha2Code: "kk", English: "Kazakh"},
{Alpha3bCode: "khm", Alpha2Code: "km", English: "Central Khmer"},
{Alpha3bCode: "kik", Alpha2Code: "ki", English: "Kikuyu; Gikuyu"},
{Alpha3bCode: "kin", Alpha2Code: "rw", English: "Kinyarwanda"},
{Alpha3bCode: "kir", Alpha2Code: "ky", English: "Kirghiz; Kyrgyz"},
{Alpha3bCode: "kom", Alpha2Code: "kv", English: "Komi"},
{Alpha3bCode: "kon", Alpha2Code: "kg", English: "Kongo"},
{Alpha3bCode: "kor", Alpha2Code: "ko", English: "Korean"},
{Alpha3bCode: "kua", Alpha2Code: "kj", English: "Kuanyama; Kwanyama"},
{Alpha3bCode: "kur", Alpha2Code: "ku", English: "Kurdish"},
{Alpha3bCode: "lao", Alpha2Code: "lo", English: "Lao"},
{Alpha3bCode: "lat", Alpha2Code: "la", English: "Latin"},
{Alpha3bCode: "lav", Alpha2Code: "lv", English: "Latvian"},
{Alpha3bCode: "lim", Alpha2Code: "li", English: "Limburgan; Limburger; Limburgish"},
{Alpha3bCode: "lin", Alpha2Code: "ln", English: "Lingala"},
{Alpha3bCode: "lit", Alpha2Code: "lt", English: "Lithuanian"},
{Alpha3bCode: "ltz", Alpha2Code: "lb", English: "Luxembourgish; Letzeburgesch"},
{Alpha3bCode: "lub", Alpha2Code: "lu", English: "Luba-Katanga"},
{Alpha3bCode: "lug", Alpha2Code: "lg", English: "Ganda"},
{Alpha3bCode: "mac", Alpha2Code: "mk", English: "Macedonian"},
{Alpha3bCode: "mah", Alpha2Code: "mh", English: "Marshallese"},
{Alpha3bCode: "mal", Alpha2Code: "ml", English: "Malayalam"},
{Alpha3bCode: "mao", Alpha2Code: "mi", English: "Maori"},
{Alpha3bCode: "mar", Alpha2Code: "mr", English: "Marathi"},
{Alpha3bCode: "may", Alpha2Code: "ms", English: "Malay"},
{Alpha3bCode: "mlg", Alpha2Code: "mg", English: "Malagasy"},
{Alpha3bCode: "mlt", Alpha2Code: "mt", English: "Maltese"},
{Alpha3bCode: "mon", Alpha2Code: "mn", English: "Mongolian"},
{Alpha3bCode: "nau", Alpha2Code: "na", English: "Nauru"},
{Alpha3bCode: "nav", Alpha2Code: "nv", English: "Navajo; Navaho"},
{Alpha3bCode: "nbl", Alpha2Code: "nr", English: "Ndebele, South; South Ndebele"},
{Alpha3bCode: "nde", Alpha2Code: "nd", English: "Ndebele, North; North Ndebele"},
{Alpha3bCode: "ndo", Alpha2Code: "ng", English: "Ndonga"},
{Alpha3bCode: "nep", Alpha2Code: "ne", English: "Nepali"},
{Alpha3bCode: "nno", Alpha2Code: "nn", English: "Norwegian Nynorsk; Nynorsk, Norwegian"},
{Alpha3bCode: "nob", Alpha2Code: "nb", English: "Bokmål, Norwegian; Norwegian Bokmål"},
{Alpha3bCode: "nor", Alpha2Code: "no", English: "Norwegian"},
{Alpha3bCode: "nya", Alpha2Code: "ny", English: "Chichewa; Chewa; Nyanja"},
{Alpha3bCode: "oci", Alpha2Code: "oc", English: "Occitan (post 1500); Provençal"},
{Alpha3bCode: "oji", Alpha2Code: "oj", English: "Ojibwa"},
{Alpha3bCode: "ori", Alpha2Code: "or", English: "Oriya"},
{Alpha3bCode: "orm", Alpha2Code: "om", English: "Oromo"},
{Alpha3bCode: "oss", Alpha2Code: "os", English: "Ossetian; Ossetic"},
{Alpha3bCode: "pan", Alpha2Code: "pa", English: "Panjabi; Punjabi"},
{Alpha3bCode: "per", Alpha2Code: "fa", English: "Persian"},
{Alpha3bCode: "pli", Alpha2Code: "pi", English: "Pali"},
{Alpha3bCode: "pol", Alpha2Code: "pl", English: "Polish"},
{Alpha3bCode: "por", Alpha2Code: "pt", English: "Portuguese"},
{Alpha3bCode: "pus", Alpha2Code: "ps", English: "Pushto; Pashto"},
{Alpha3bCode: "que", Alpha2Code: "qu", English: "Quechua"},
{Alpha3bCode: "roh", Alpha2Code: "rm", English: "Romansh"},
{Alpha3bCode: "rum", Alpha2Code: "ro", English: "Romanian; Moldavian; Moldovan"},
{Alpha3bCode: "run", Alpha2Code: "rn", English: "Rundi"},
{Alpha3bCode: "rus", Alpha2Code: "ru", English: "Russian"},
{Alpha3bCode: "sag", Alpha2Code: "sg", English: "Sango"},
{Alpha3bCode: "san", Alpha2Code: "sa", English: "Sanskrit"},
{Alpha3bCode: "sin", Alpha2Code: "si", English: "Sinhala; Sinhalese"},
{Alpha3bCode: "slo", Alpha2Code: "sk", English: "Slovak"},
{Alpha3bCode: "slv", Alpha2Code: "sl", English: "Slovenian"},
{Alpha3bCode: "sme", Alpha2Code: "se", English: "Northern Sami"},
{Alpha3bCode: "smo", Alpha2Code: "sm", English: "Samoan"},
{Alpha3bCode: "sna", Alpha2Code: "sn", English: "Shona"},
{Alpha3bCode: "snd", Alpha2Code: "sd", English: "Sindhi"},
{Alpha3bCode: "som", Alpha2Code: "so", English: "Somali"},
{Alpha3bCode: "sot", Alpha2Code: "st", English: "Sotho, Southern"},
{Alpha3bCode: "spa", Alpha2Code: "es", English: "Spanish; Castilian"},
{Alpha3bCode: "srd", Alpha2Code: "sc", English: "Sardinian"},
{Alpha3bCode: "srp", Alpha2Code: "sr", English: "Serbian"},
{Alpha3bCode: "ssw", Alpha2Code: "ss", English: "Swati"},
{Alpha3bCode: "sun", Alpha2Code: "su", English: "Sundanese"},
{Alpha3bCode: "swa", Alpha2Code: "sw", English: "Swahili"},
{Alpha3bCode: "swe", Alpha2Code: "sv", English: "Swedish"},
{Alpha3bCode: "tah", Alpha2Code: "ty", English: "Tahitian"},
{Alpha3bCode: "tam", Alpha2Code: "ta", English: "Tamil"},
{Alpha3bCode: "tat", Alpha2Code: "tt", English: "Tatar"},
{Alpha3bCode: "tel", Alpha2Code: "te", English: "Telugu"},
{Alpha3bCode: "tgk", Alpha2Code: "tg", English: "Tajik"},
{Alpha3bCode: "tgl", Alpha2Code: "tl", English: "Tagalog"},
{Alpha3bCode: "tha", Alpha2Code: "th", English: "Thai"},
{Alpha3bCode: "tib", Alpha2Code: "bo", English: "Tibetan"},
{Alpha3bCode: "tir", Alpha2Code: "ti", English: "Tigrinya"},
{Alpha3bCode: "ton", Alpha2Code: "to", English: "Tonga (Tonga Islands)"},
{Alpha3bCode: "tsn", Alpha2Code: "tn", English: "Tswana"},
{Alpha3bCode: "tso", Alpha2Code: "ts", English: "Tsonga"},
{Alpha3bCode: "tuk", Alpha2Code: "tk", English: "Turkmen"},
{Alpha3bCode: "tur", Alpha2Code: "tr", English: "Turkish"},
{Alpha3bCode: "twi", Alpha2Code: "tw", English: "Twi"},
{Alpha3bCode: "uig", Alpha2Code: "ug", English: "Uighur; Uyghur"},
{Alpha3bCode: "ukr", Alpha2Code: "uk", English: "Ukrainian"},
{Alpha3bCode: "urd", Alpha2Code: "ur", English: "Urdu"},
{Alpha3bCode: "uzb", Alpha2Code: "uz", English: "Uzbek"},
{Alpha3bCode: "ven", Alpha2Code: "ve", English: "Venda"},
{Alpha3bCode: "vie", Alpha2Code: "vi", English: "Vietnamese"},
{Alpha3bCode: "vol", Alpha2Code: "vo", English: "Volapük"},
{Alpha3bCode: "wel", Alpha2Code: "cy", English: "Welsh"},
{Alpha3bCode: "wln", Alpha2Code: "wa", English: "Walloon"},
{Alpha3bCode: "wol", Alpha2Code: "wo", English: "Wolof"},
{Alpha3bCode: "xho", Alpha2Code: "xh", English: "Xhosa"},
{Alpha3bCode: "yid", Alpha2Code: "yi", English: "Yiddish"},
{Alpha3bCode: "yor", Alpha2Code: "yo", English: "Yoruba"},
{Alpha3bCode: "zha", Alpha2Code: "za", English: "Zhuang; Chuang"},
{Alpha3bCode: "zul", Alpha2Code: "zu", English: "Zulu"},
}

View File

@@ -457,6 +457,26 @@ func IsISO3166Alpha3(str string) bool {
return false return false
} }
// IsISO693Alpha2 checks if a string is valid two-letter language code
func IsISO693Alpha2(str string) bool {
for _, entry := range ISO693List {
if str == entry.Alpha2Code {
return true
}
}
return false
}
// IsISO693Alpha3b checks if a string is valid three-letter language code
func IsISO693Alpha3b(str string) bool {
for _, entry := range ISO693List {
if str == entry.Alpha3bCode {
return true
}
}
return false
}
// IsDNSName will validate the given string as a DNS name // IsDNSName will validate the given string as a DNS name
func IsDNSName(str string) bool { func IsDNSName(str string) bool {
if str == "" || len(strings.Replace(str, ".", "", -1)) > 255 { if str == "" || len(strings.Replace(str, ".", "", -1)) > 255 {
@@ -575,7 +595,7 @@ func ValidateStruct(s interface{}) (bool, error) {
continue // Private field continue // Private field
} }
structResult := true structResult := true
if valueField.Kind() == reflect.Struct { if valueField.Kind() == reflect.Struct && typeField.Tag.Get(tagName) != "-" {
var err error var err error
structResult, err = ValidateStruct(valueField.Interface()) structResult, err = ValidateStruct(valueField.Interface())
if err != nil { if err != nil {

View File

@@ -4,9 +4,9 @@ package aws
import "time" import "time"
// An emptyCtx is a copy of the the Go 1.7 context.emptyCtx type. This // An emptyCtx is a copy of the Go 1.7 context.emptyCtx type. This is copied to
// is copied to provide a 1.6 and 1.5 safe version of context that is compatible // provide a 1.6 and 1.5 safe version of context that is compatible with Go
// with Go 1.7's Context. // 1.7's Context.
// //
// An emptyCtx is never canceled, has no values, and has no deadline. It is not // An emptyCtx is never canceled, has no values, and has no deadline. It is not
// struct{}, since vars of this type must have distinct addresses. // struct{}, since vars of this type must have distinct addresses.

View File

@@ -311,6 +311,24 @@ func TimeValue(v *time.Time) time.Time {
return time.Time{} return time.Time{}
} }
// SecondsTimeValue converts an int64 pointer to a time.Time value
// representing seconds since Epoch or time.Time{} if the pointer is nil.
func SecondsTimeValue(v *int64) time.Time {
if v != nil {
return time.Unix((*v / 1000), 0)
}
return time.Time{}
}
// MillisecondsTimeValue converts an int64 pointer to a time.Time value
// representing milliseconds sinch Epoch or time.Time{} if the pointer is nil.
func MillisecondsTimeValue(v *int64) time.Time {
if v != nil {
return time.Unix(0, (*v * 1000000))
}
return time.Time{}
}
// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC". // TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC".
// The result is undefined if the Unix time cannot be represented by an int64. // The result is undefined if the Unix time cannot be represented by an int64.
// Which includes calling TimeUnixMilli on a zero Time is undefined. // Which includes calling TimeUnixMilli on a zero Time is undefined.

View File

@@ -55,6 +55,7 @@ const (
CloudformationServiceID = "cloudformation" // Cloudformation. CloudformationServiceID = "cloudformation" // Cloudformation.
CloudfrontServiceID = "cloudfront" // Cloudfront. CloudfrontServiceID = "cloudfront" // Cloudfront.
CloudhsmServiceID = "cloudhsm" // Cloudhsm. CloudhsmServiceID = "cloudhsm" // Cloudhsm.
Cloudhsmv2ServiceID = "cloudhsmv2" // Cloudhsmv2.
CloudsearchServiceID = "cloudsearch" // Cloudsearch. CloudsearchServiceID = "cloudsearch" // Cloudsearch.
CloudtrailServiceID = "cloudtrail" // Cloudtrail. CloudtrailServiceID = "cloudtrail" // Cloudtrail.
CodebuildServiceID = "codebuild" // Codebuild. CodebuildServiceID = "codebuild" // Codebuild.
@@ -91,6 +92,7 @@ const (
FirehoseServiceID = "firehose" // Firehose. FirehoseServiceID = "firehose" // Firehose.
GameliftServiceID = "gamelift" // Gamelift. GameliftServiceID = "gamelift" // Gamelift.
GlacierServiceID = "glacier" // Glacier. GlacierServiceID = "glacier" // Glacier.
GlueServiceID = "glue" // Glue.
GreengrassServiceID = "greengrass" // Greengrass. GreengrassServiceID = "greengrass" // Greengrass.
HealthServiceID = "health" // Health. HealthServiceID = "health" // Health.
IamServiceID = "iam" // Iam. IamServiceID = "iam" // Iam.
@@ -106,6 +108,7 @@ const (
MachinelearningServiceID = "machinelearning" // Machinelearning. MachinelearningServiceID = "machinelearning" // Machinelearning.
MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics. MarketplacecommerceanalyticsServiceID = "marketplacecommerceanalytics" // Marketplacecommerceanalytics.
MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace. MeteringMarketplaceServiceID = "metering.marketplace" // MeteringMarketplace.
MghServiceID = "mgh" // Mgh.
MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics. MobileanalyticsServiceID = "mobileanalytics" // Mobileanalytics.
ModelsLexServiceID = "models.lex" // ModelsLex. ModelsLexServiceID = "models.lex" // ModelsLex.
MonitoringServiceID = "monitoring" // Monitoring. MonitoringServiceID = "monitoring" // Monitoring.
@@ -347,6 +350,7 @@ var awsPartition = partition{
Endpoints: endpoints{ Endpoints: endpoints{
"ap-northeast-1": endpoint{}, "ap-northeast-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{}, "ap-southeast-2": endpoint{},
"eu-central-1": endpoint{}, "eu-central-1": endpoint{},
"eu-west-1": endpoint{}, "eu-west-1": endpoint{},
@@ -429,6 +433,15 @@ var awsPartition = partition{
"us-west-2": endpoint{}, "us-west-2": endpoint{},
}, },
}, },
"cloudhsmv2": service{
Endpoints: endpoints{
"eu-west-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-2": endpoint{},
},
},
"cloudsearch": service{ "cloudsearch": service{
Endpoints: endpoints{ Endpoints: endpoints{
@@ -521,6 +534,8 @@ var awsPartition = partition{
Endpoints: endpoints{ Endpoints: endpoints{
"ap-northeast-1": endpoint{}, "ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-1": endpoint{}, "ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{}, "ap-southeast-2": endpoint{},
"ca-central-1": endpoint{}, "ca-central-1": endpoint{},
@@ -537,10 +552,15 @@ var awsPartition = partition{
"codestar": service{ "codestar": service{
Endpoints: endpoints{ Endpoints: endpoints{
"eu-west-1": endpoint{}, "ap-southeast-1": endpoint{},
"us-east-1": endpoint{}, "ap-southeast-2": endpoint{},
"us-east-2": endpoint{}, "eu-central-1": endpoint{},
"us-west-2": endpoint{}, "eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-1": endpoint{},
"us-west-2": endpoint{},
}, },
}, },
"cognito-identity": service{ "cognito-identity": service{
@@ -549,6 +569,7 @@ var awsPartition = partition{
"ap-northeast-1": endpoint{}, "ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{}, "ap-northeast-2": endpoint{},
"ap-south-1": endpoint{}, "ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{}, "ap-southeast-2": endpoint{},
"eu-central-1": endpoint{}, "eu-central-1": endpoint{},
"eu-west-1": endpoint{}, "eu-west-1": endpoint{},
@@ -564,6 +585,7 @@ var awsPartition = partition{
"ap-northeast-1": endpoint{}, "ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{}, "ap-northeast-2": endpoint{},
"ap-south-1": endpoint{}, "ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{}, "ap-southeast-2": endpoint{},
"eu-central-1": endpoint{}, "eu-central-1": endpoint{},
"eu-west-1": endpoint{}, "eu-west-1": endpoint{},
@@ -579,6 +601,7 @@ var awsPartition = partition{
"ap-northeast-1": endpoint{}, "ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{}, "ap-northeast-2": endpoint{},
"ap-south-1": endpoint{}, "ap-south-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{}, "ap-southeast-2": endpoint{},
"eu-central-1": endpoint{}, "eu-central-1": endpoint{},
"eu-west-1": endpoint{}, "eu-west-1": endpoint{},
@@ -684,6 +707,7 @@ var awsPartition = partition{
"eu-central-1": endpoint{}, "eu-central-1": endpoint{},
"eu-west-1": endpoint{}, "eu-west-1": endpoint{},
"eu-west-2": endpoint{}, "eu-west-2": endpoint{},
"sa-east-1": endpoint{},
"us-east-1": endpoint{}, "us-east-1": endpoint{},
"us-east-2": endpoint{}, "us-east-2": endpoint{},
"us-west-2": endpoint{}, "us-west-2": endpoint{},
@@ -823,6 +847,7 @@ var awsPartition = partition{
Endpoints: endpoints{ Endpoints: endpoints{
"ap-southeast-2": endpoint{}, "ap-southeast-2": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{}, "eu-west-1": endpoint{},
"us-east-1": endpoint{}, "us-east-1": endpoint{},
"us-east-2": endpoint{}, "us-east-2": endpoint{},
@@ -831,7 +856,7 @@ var awsPartition = partition{
}, },
"elasticloadbalancing": service{ "elasticloadbalancing": service{
Defaults: endpoint{ Defaults: endpoint{
Protocols: []string{"http", "https"}, Protocols: []string{"https"},
}, },
Endpoints: endpoints{ Endpoints: endpoints{
"ap-northeast-1": endpoint{}, "ap-northeast-1": endpoint{},
@@ -960,10 +985,15 @@ var awsPartition = partition{
"ap-northeast-2": endpoint{}, "ap-northeast-2": endpoint{},
"ap-south-1": endpoint{}, "ap-south-1": endpoint{},
"ap-southeast-1": endpoint{}, "ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
"ca-central-1": endpoint{},
"eu-central-1": endpoint{}, "eu-central-1": endpoint{},
"eu-west-1": endpoint{}, "eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"sa-east-1": endpoint{}, "sa-east-1": endpoint{},
"us-east-1": endpoint{}, "us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-1": endpoint{},
"us-west-2": endpoint{}, "us-west-2": endpoint{},
}, },
}, },
@@ -986,6 +1016,12 @@ var awsPartition = partition{
"us-west-2": endpoint{}, "us-west-2": endpoint{},
}, },
}, },
"glue": service{
Endpoints: endpoints{
"us-east-1": endpoint{},
},
},
"greengrass": service{ "greengrass": service{
IsRegionalized: boxedTrue, IsRegionalized: boxedTrue,
Defaults: endpoint{ Defaults: endpoint{
@@ -1199,6 +1235,12 @@ var awsPartition = partition{
"us-west-2": endpoint{}, "us-west-2": endpoint{},
}, },
}, },
"mgh": service{
Endpoints: endpoints{
"us-west-2": endpoint{},
},
},
"mobileanalytics": service{ "mobileanalytics": service{
Endpoints: endpoints{ Endpoints: endpoints{
@@ -1461,6 +1503,7 @@ var awsPartition = partition{
Endpoints: endpoints{ Endpoints: endpoints{
"ap-northeast-1": endpoint{}, "ap-northeast-1": endpoint{},
"ap-northeast-2": endpoint{},
"ap-southeast-1": endpoint{}, "ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{}, "ap-southeast-2": endpoint{},
"ca-central-1": endpoint{}, "ca-central-1": endpoint{},
@@ -1489,7 +1532,6 @@ var awsPartition = partition{
"ap-northeast-2": endpoint{}, "ap-northeast-2": endpoint{},
"ap-south-1": endpoint{}, "ap-south-1": endpoint{},
"ap-southeast-2": endpoint{}, "ap-southeast-2": endpoint{},
"ca-central-1": endpoint{},
"eu-central-1": endpoint{}, "eu-central-1": endpoint{},
"eu-west-1": endpoint{}, "eu-west-1": endpoint{},
"us-east-1": endpoint{}, "us-east-1": endpoint{},
@@ -1505,6 +1547,7 @@ var awsPartition = partition{
"eu-central-1": endpoint{}, "eu-central-1": endpoint{},
"eu-west-1": endpoint{}, "eu-west-1": endpoint{},
"eu-west-2": endpoint{}, "eu-west-2": endpoint{},
"sa-east-1": endpoint{},
"us-east-1": endpoint{}, "us-east-1": endpoint{},
"us-east-2": endpoint{}, "us-east-2": endpoint{},
"us-west-1": endpoint{}, "us-west-1": endpoint{},
@@ -1582,6 +1625,7 @@ var awsPartition = partition{
"ap-southeast-2": endpoint{}, "ap-southeast-2": endpoint{},
"eu-central-1": endpoint{}, "eu-central-1": endpoint{},
"eu-west-1": endpoint{}, "eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"us-east-1": endpoint{}, "us-east-1": endpoint{},
"us-east-2": endpoint{}, "us-east-2": endpoint{},
"us-west-2": endpoint{}, "us-west-2": endpoint{},
@@ -1755,6 +1799,7 @@ var awsPartition = partition{
"ap-northeast-1": endpoint{}, "ap-northeast-1": endpoint{},
"eu-west-1": endpoint{}, "eu-west-1": endpoint{},
"us-east-1": endpoint{}, "us-east-1": endpoint{},
"us-west-1": endpoint{},
"us-west-2": endpoint{}, "us-west-2": endpoint{},
}, },
}, },
@@ -1829,6 +1874,18 @@ var awscnPartition = partition{
}, },
}, },
Services: services{ Services: services{
"application-autoscaling": service{
Defaults: endpoint{
Hostname: "autoscaling.{region}.amazonaws.com",
Protocols: []string{"http", "https"},
CredentialScope: credentialScope{
Service: "application-autoscaling",
},
},
Endpoints: endpoints{
"cn-north-1": endpoint{},
},
},
"autoscaling": service{ "autoscaling": service{
Defaults: endpoint{ Defaults: endpoint{
Protocols: []string{"http", "https"}, Protocols: []string{"http", "https"},
@@ -1920,7 +1977,7 @@ var awscnPartition = partition{
}, },
"elasticloadbalancing": service{ "elasticloadbalancing": service{
Defaults: endpoint{ Defaults: endpoint{
Protocols: []string{"http", "https"}, Protocols: []string{"https"},
}, },
Endpoints: endpoints{ Endpoints: endpoints{
"cn-north-1": endpoint{}, "cn-north-1": endpoint{},
@@ -1961,6 +2018,16 @@ var awscnPartition = partition{
}, },
}, },
}, },
"iot": service{
Defaults: endpoint{
CredentialScope: credentialScope{
Service: "execute-api",
},
},
Endpoints: endpoints{
"cn-north-1": endpoint{},
},
},
"kinesis": service{ "kinesis": service{
Endpoints: endpoints{ Endpoints: endpoints{
@@ -2089,6 +2156,18 @@ var awsusgovPartition = partition{
}, },
}, },
Services: services{ Services: services{
"acm": service{
Endpoints: endpoints{
"us-gov-west-1": endpoint{},
},
},
"apigateway": service{
Endpoints: endpoints{
"us-gov-west-1": endpoint{},
},
},
"autoscaling": service{ "autoscaling": service{
Endpoints: endpoints{ Endpoints: endpoints{

View File

@@ -21,12 +21,12 @@
// partitions := resolver.(endpoints.EnumPartitions).Partitions() // partitions := resolver.(endpoints.EnumPartitions).Partitions()
// //
// for _, p := range partitions { // for _, p := range partitions {
// fmt.Println("Regions for", p.Name) // fmt.Println("Regions for", p.ID())
// for id, _ := range p.Regions() { // for id, _ := range p.Regions() {
// fmt.Println("*", id) // fmt.Println("*", id)
// } // }
// //
// fmt.Println("Services for", p.Name) // fmt.Println("Services for", p.ID())
// for id, _ := range p.Services() { // for id, _ := range p.Services() {
// fmt.Println("*", id) // fmt.Println("*", id)
// } // }

View File

@@ -24,7 +24,7 @@ const (
// ErrCodeRead is an error that is returned during HTTP reads. // ErrCodeRead is an error that is returned during HTTP reads.
ErrCodeRead = "ReadError" ErrCodeRead = "ReadError"
// ErrCodeResponseTimeout is the connection timeout error that is recieved // ErrCodeResponseTimeout is the connection timeout error that is received
// during body reads. // during body reads.
ErrCodeResponseTimeout = "ResponseTimeout" ErrCodeResponseTimeout = "ResponseTimeout"
@@ -269,11 +269,17 @@ func (r *Request) Presign(expireTime time.Duration) (string, error) {
return r.HTTPRequest.URL.String(), nil return r.HTTPRequest.URL.String(), nil
} }
// PresignRequest behaves just like presign, but hoists all headers and signs them. // PresignRequest behaves just like presign, with the addition of returning a
// Also returns the signed hash back to the user // set of headers that were signed.
//
// Returns the URL string for the API operation with signature in the query string,
// and the HTTP headers that were included in the signature. These headers must
// be included in any HTTP request made with the presigned URL.
//
// To prevent hoisting any headers to the query string set NotHoist to true on
// this Request value prior to calling PresignRequest.
func (r *Request) PresignRequest(expireTime time.Duration) (string, http.Header, error) { func (r *Request) PresignRequest(expireTime time.Duration) (string, http.Header, error) {
r.ExpireTime = expireTime r.ExpireTime = expireTime
r.NotHoist = true
r.Sign() r.Sign()
if r.Error != nil { if r.Error != nil {
return "", nil, r.Error return "", nil, r.Error

View File

@@ -70,8 +70,8 @@ func isCodeExpiredCreds(code string) bool {
} }
var validParentCodes = map[string]struct{}{ var validParentCodes = map[string]struct{}{
ErrCodeSerialization: struct{}{}, ErrCodeSerialization: {},
ErrCodeRead: struct{}{}, ErrCodeRead: {},
} }
type temporaryError interface { type temporaryError interface {

View File

@@ -79,8 +79,9 @@ type Waiter struct {
MaxAttempts int MaxAttempts int
Delay WaiterDelay Delay WaiterDelay
RequestOptions []Option RequestOptions []Option
NewRequest func([]Option) (*Request, error) NewRequest func([]Option) (*Request, error)
SleepWithContext func(aws.Context, time.Duration) error
} }
// ApplyOptions updates the waiter with the list of waiter options provided. // ApplyOptions updates the waiter with the list of waiter options provided.
@@ -195,8 +196,15 @@ func (w Waiter) WaitWithContext(ctx aws.Context) error {
if sleepFn := req.Config.SleepDelay; sleepFn != nil { if sleepFn := req.Config.SleepDelay; sleepFn != nil {
// Support SleepDelay for backwards compatibility and testing // Support SleepDelay for backwards compatibility and testing
sleepFn(delay) sleepFn(delay)
} else if err := aws.SleepWithContext(ctx, delay); err != nil { } else {
return awserr.New(CanceledErrorCode, "waiter context canceled", err) sleepCtxFn := w.SleepWithContext
if sleepCtxFn == nil {
sleepCtxFn = aws.SleepWithContext
}
if err := sleepCtxFn(ctx, delay); err != nil {
return awserr.New(CanceledErrorCode, "waiter context canceled", err)
}
} }
} }

View File

@@ -76,7 +76,7 @@ type envConfig struct {
SharedConfigFile string SharedConfigFile string
// Sets the path to a custom Credentials Authroity (CA) Bundle PEM file // Sets the path to a custom Credentials Authroity (CA) Bundle PEM file
// that the SDK will use instead of the the system's root CA bundle. // that the SDK will use instead of the system's root CA bundle.
// Only use this if you want to configure the SDK to use a custom set // Only use this if you want to configure the SDK to use a custom set
// of CAs. // of CAs.
// //

View File

@@ -55,7 +55,6 @@
package v4 package v4
import ( import (
"bytes"
"crypto/hmac" "crypto/hmac"
"crypto/sha256" "crypto/sha256"
"encoding/hex" "encoding/hex"
@@ -503,6 +502,8 @@ func (ctx *signingCtx) build(disableHeaderHoisting bool) {
ctx.buildTime() // no depends ctx.buildTime() // no depends
ctx.buildCredentialString() // no depends ctx.buildCredentialString() // no depends
ctx.buildBodyDigest()
unsignedHeaders := ctx.Request.Header unsignedHeaders := ctx.Request.Header
if ctx.isPresign { if ctx.isPresign {
if !disableHeaderHoisting { if !disableHeaderHoisting {
@@ -514,7 +515,6 @@ func (ctx *signingCtx) build(disableHeaderHoisting bool) {
} }
} }
ctx.buildBodyDigest()
ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders) ctx.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders)
ctx.buildCanonicalString() // depends on canon headers / signed headers ctx.buildCanonicalString() // depends on canon headers / signed headers
ctx.buildStringToSign() // depends on canon string ctx.buildStringToSign() // depends on canon string
@@ -614,8 +614,8 @@ func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) {
strings.Join(ctx.SignedHeaderVals[k], ",") strings.Join(ctx.SignedHeaderVals[k], ",")
} }
} }
stripExcessSpaces(headerValues)
ctx.canonicalHeaders = strings.Join(stripExcessSpaces(headerValues), "\n") ctx.canonicalHeaders = strings.Join(headerValues, "\n")
} }
func (ctx *signingCtx) buildCanonicalString() { func (ctx *signingCtx) buildCanonicalString() {
@@ -717,45 +717,46 @@ func makeSha256Reader(reader io.ReadSeeker) []byte {
return hash.Sum(nil) return hash.Sum(nil)
} }
const doubleSpaces = " " const doubleSpace = " "
var doubleSpaceBytes = []byte(doubleSpaces) // stripExcessSpaces will rewrite the passed in slice's string values to not
// contain muliple side-by-side spaces.
func stripExcessSpaces(vals []string) {
var j, k, l, m, spaces int
for i, str := range vals {
// Trim trailing spaces
for j = len(str) - 1; j >= 0 && str[j] == ' '; j-- {
}
func stripExcessSpaces(headerVals []string) []string { // Trim leading spaces
vals := make([]string, len(headerVals)) for k = 0; k < j && str[k] == ' '; k++ {
for i, str := range headerVals { }
// Trim leading and trailing spaces str = str[k : j+1]
trimmed := strings.TrimSpace(str)
idx := strings.Index(trimmed, doubleSpaces) // Strip multiple spaces.
if idx < 0 { j = strings.Index(str, doubleSpace)
vals[i] = trimmed if j < 0 {
vals[i] = str
continue continue
} }
buf := []byte(trimmed) buf := []byte(str)
for idx > -1 { for k, m, l = j, j, len(buf); k < l; k++ {
stripToIdx := -1 if buf[k] == ' ' {
for j := idx + 1; j < len(buf); j++ { if spaces == 0 {
if buf[j] != ' ' { // First space.
buf = append(buf[:idx+1], buf[j:]...) buf[m] = buf[k]
stripToIdx = j - idx - 1 m++
break
}
}
if stripToIdx >= 0 {
// Find next double space
idx = bytes.Index(buf[stripToIdx:], doubleSpaceBytes)
if idx >= 0 {
idx += stripToIdx
} }
spaces++
} else { } else {
idx = -1 // End of multiple spaces.
spaces = 0
buf[m] = buf[k]
m++
} }
} }
vals[i] = string(buf) vals[i] = string(buf[:m])
} }
return vals
} }

View File

@@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go" const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK // SDKVersion is the version of this SDK
const SDKVersion = "1.10.12" const SDKVersion = "1.10.38"

View File

@@ -17,19 +17,18 @@ const opBatchGetItem = "BatchGetItem"
// BatchGetItemRequest generates a "aws/request.Request" representing the // BatchGetItemRequest generates a "aws/request.Request" representing the
// client's request for the BatchGetItem operation. The "output" return // client's request for the BatchGetItem operation. The "output" return
// value can be used to capture response data after the request's "Send" method // value will be populated with the request's response once the request complets
// is called. // successfuly.
// //
// See BatchGetItem for usage and error information. // Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
// //
// Creating a request object using this method should be used when you want to inject // See BatchGetItem for more information on using the BatchGetItem
// custom logic into the request's lifecycle using a custom handler, or if you want to // API call, and error handling.
// access properties on the request object before or after sending the request. If //
// you just want the service response, call the BatchGetItem method directly // This method is useful when you want to inject custom logic or configuration
// instead. // into the SDK's request lifecycle. Such as custom headers, or retry logic.
// //
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
// //
// // Example sending a request using the BatchGetItemRequest method. // // Example sending a request using the BatchGetItemRequest method.
// req, resp := client.BatchGetItemRequest(params) // req, resp := client.BatchGetItemRequest(params)
@@ -213,19 +212,18 @@ const opBatchWriteItem = "BatchWriteItem"
// BatchWriteItemRequest generates a "aws/request.Request" representing the // BatchWriteItemRequest generates a "aws/request.Request" representing the
// client's request for the BatchWriteItem operation. The "output" return // client's request for the BatchWriteItem operation. The "output" return
// value can be used to capture response data after the request's "Send" method // value will be populated with the request's response once the request complets
// is called. // successfuly.
// //
// See BatchWriteItem for usage and error information. // Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
// //
// Creating a request object using this method should be used when you want to inject // See BatchWriteItem for more information on using the BatchWriteItem
// custom logic into the request's lifecycle using a custom handler, or if you want to // API call, and error handling.
// access properties on the request object before or after sending the request. If //
// you just want the service response, call the BatchWriteItem method directly // This method is useful when you want to inject custom logic or configuration
// instead. // into the SDK's request lifecycle. Such as custom headers, or retry logic.
// //
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
// //
// // Example sending a request using the BatchWriteItemRequest method. // // Example sending a request using the BatchWriteItemRequest method.
// req, resp := client.BatchWriteItemRequest(params) // req, resp := client.BatchWriteItemRequest(params)
@@ -377,19 +375,18 @@ const opCreateTable = "CreateTable"
// CreateTableRequest generates a "aws/request.Request" representing the // CreateTableRequest generates a "aws/request.Request" representing the
// client's request for the CreateTable operation. The "output" return // client's request for the CreateTable operation. The "output" return
// value can be used to capture response data after the request's "Send" method // value will be populated with the request's response once the request complets
// is called. // successfuly.
// //
// See CreateTable for usage and error information. // Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
// //
// Creating a request object using this method should be used when you want to inject // See CreateTable for more information on using the CreateTable
// custom logic into the request's lifecycle using a custom handler, or if you want to // API call, and error handling.
// access properties on the request object before or after sending the request. If //
// you just want the service response, call the CreateTable method directly // This method is useful when you want to inject custom logic or configuration
// instead. // into the SDK's request lifecycle. Such as custom headers, or retry logic.
// //
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
// //
// // Example sending a request using the CreateTableRequest method. // // Example sending a request using the CreateTableRequest method.
// req, resp := client.CreateTableRequest(params) // req, resp := client.CreateTableRequest(params)
@@ -486,19 +483,18 @@ const opDeleteItem = "DeleteItem"
// DeleteItemRequest generates a "aws/request.Request" representing the // DeleteItemRequest generates a "aws/request.Request" representing the
// client's request for the DeleteItem operation. The "output" return // client's request for the DeleteItem operation. The "output" return
// value can be used to capture response data after the request's "Send" method // value will be populated with the request's response once the request complets
// is called. // successfuly.
// //
// See DeleteItem for usage and error information. // Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
// //
// Creating a request object using this method should be used when you want to inject // See DeleteItem for more information on using the DeleteItem
// custom logic into the request's lifecycle using a custom handler, or if you want to // API call, and error handling.
// access properties on the request object before or after sending the request. If //
// you just want the service response, call the DeleteItem method directly // This method is useful when you want to inject custom logic or configuration
// instead. // into the SDK's request lifecycle. Such as custom headers, or retry logic.
// //
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
// //
// // Example sending a request using the DeleteItemRequest method. // // Example sending a request using the DeleteItemRequest method.
// req, resp := client.DeleteItemRequest(params) // req, resp := client.DeleteItemRequest(params)
@@ -598,19 +594,18 @@ const opDeleteTable = "DeleteTable"
// DeleteTableRequest generates a "aws/request.Request" representing the // DeleteTableRequest generates a "aws/request.Request" representing the
// client's request for the DeleteTable operation. The "output" return // client's request for the DeleteTable operation. The "output" return
// value can be used to capture response data after the request's "Send" method // value will be populated with the request's response once the request complets
// is called. // successfuly.
// //
// See DeleteTable for usage and error information. // Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
// //
// Creating a request object using this method should be used when you want to inject // See DeleteTable for more information on using the DeleteTable
// custom logic into the request's lifecycle using a custom handler, or if you want to // API call, and error handling.
// access properties on the request object before or after sending the request. If //
// you just want the service response, call the DeleteTable method directly // This method is useful when you want to inject custom logic or configuration
// instead. // into the SDK's request lifecycle. Such as custom headers, or retry logic.
// //
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
// //
// // Example sending a request using the DeleteTableRequest method. // // Example sending a request using the DeleteTableRequest method.
// req, resp := client.DeleteTableRequest(params) // req, resp := client.DeleteTableRequest(params)
@@ -714,19 +709,18 @@ const opDescribeLimits = "DescribeLimits"
// DescribeLimitsRequest generates a "aws/request.Request" representing the // DescribeLimitsRequest generates a "aws/request.Request" representing the
// client's request for the DescribeLimits operation. The "output" return // client's request for the DescribeLimits operation. The "output" return
// value can be used to capture response data after the request's "Send" method // value will be populated with the request's response once the request complets
// is called. // successfuly.
// //
// See DescribeLimits for usage and error information. // Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
// //
// Creating a request object using this method should be used when you want to inject // See DescribeLimits for more information on using the DescribeLimits
// custom logic into the request's lifecycle using a custom handler, or if you want to // API call, and error handling.
// access properties on the request object before or after sending the request. If //
// you just want the service response, call the DescribeLimits method directly // This method is useful when you want to inject custom logic or configuration
// instead. // into the SDK's request lifecycle. Such as custom headers, or retry logic.
// //
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
// //
// // Example sending a request using the DescribeLimitsRequest method. // // Example sending a request using the DescribeLimitsRequest method.
// req, resp := client.DescribeLimitsRequest(params) // req, resp := client.DescribeLimitsRequest(params)
@@ -850,19 +844,18 @@ const opDescribeTable = "DescribeTable"
// DescribeTableRequest generates a "aws/request.Request" representing the // DescribeTableRequest generates a "aws/request.Request" representing the
// client's request for the DescribeTable operation. The "output" return // client's request for the DescribeTable operation. The "output" return
// value can be used to capture response data after the request's "Send" method // value will be populated with the request's response once the request complets
// is called. // successfuly.
// //
// See DescribeTable for usage and error information. // Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
// //
// Creating a request object using this method should be used when you want to inject // See DescribeTable for more information on using the DescribeTable
// custom logic into the request's lifecycle using a custom handler, or if you want to // API call, and error handling.
// access properties on the request object before or after sending the request. If //
// you just want the service response, call the DescribeTable method directly // This method is useful when you want to inject custom logic or configuration
// instead. // into the SDK's request lifecycle. Such as custom headers, or retry logic.
// //
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
// //
// // Example sending a request using the DescribeTableRequest method. // // Example sending a request using the DescribeTableRequest method.
// req, resp := client.DescribeTableRequest(params) // req, resp := client.DescribeTableRequest(params)
@@ -942,19 +935,18 @@ const opDescribeTimeToLive = "DescribeTimeToLive"
// DescribeTimeToLiveRequest generates a "aws/request.Request" representing the // DescribeTimeToLiveRequest generates a "aws/request.Request" representing the
// client's request for the DescribeTimeToLive operation. The "output" return // client's request for the DescribeTimeToLive operation. The "output" return
// value can be used to capture response data after the request's "Send" method // value will be populated with the request's response once the request complets
// is called. // successfuly.
// //
// See DescribeTimeToLive for usage and error information. // Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
// //
// Creating a request object using this method should be used when you want to inject // See DescribeTimeToLive for more information on using the DescribeTimeToLive
// custom logic into the request's lifecycle using a custom handler, or if you want to // API call, and error handling.
// access properties on the request object before or after sending the request. If //
// you just want the service response, call the DescribeTimeToLive method directly // This method is useful when you want to inject custom logic or configuration
// instead. // into the SDK's request lifecycle. Such as custom headers, or retry logic.
// //
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
// //
// // Example sending a request using the DescribeTimeToLiveRequest method. // // Example sending a request using the DescribeTimeToLiveRequest method.
// req, resp := client.DescribeTimeToLiveRequest(params) // req, resp := client.DescribeTimeToLiveRequest(params)
@@ -1026,19 +1018,18 @@ const opGetItem = "GetItem"
// GetItemRequest generates a "aws/request.Request" representing the // GetItemRequest generates a "aws/request.Request" representing the
// client's request for the GetItem operation. The "output" return // client's request for the GetItem operation. The "output" return
// value can be used to capture response data after the request's "Send" method // value will be populated with the request's response once the request complets
// is called. // successfuly.
// //
// See GetItem for usage and error information. // Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
// //
// Creating a request object using this method should be used when you want to inject // See GetItem for more information on using the GetItem
// custom logic into the request's lifecycle using a custom handler, or if you want to // API call, and error handling.
// access properties on the request object before or after sending the request. If //
// you just want the service response, call the GetItem method directly // This method is useful when you want to inject custom logic or configuration
// instead. // into the SDK's request lifecycle. Such as custom headers, or retry logic.
// //
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
// //
// // Example sending a request using the GetItemRequest method. // // Example sending a request using the GetItemRequest method.
// req, resp := client.GetItemRequest(params) // req, resp := client.GetItemRequest(params)
@@ -1125,19 +1116,18 @@ const opListTables = "ListTables"
// ListTablesRequest generates a "aws/request.Request" representing the // ListTablesRequest generates a "aws/request.Request" representing the
// client's request for the ListTables operation. The "output" return // client's request for the ListTables operation. The "output" return
// value can be used to capture response data after the request's "Send" method // value will be populated with the request's response once the request complets
// is called. // successfuly.
// //
// See ListTables for usage and error information. // Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
// //
// Creating a request object using this method should be used when you want to inject // See ListTables for more information on using the ListTables
// custom logic into the request's lifecycle using a custom handler, or if you want to // API call, and error handling.
// access properties on the request object before or after sending the request. If //
// you just want the service response, call the ListTables method directly // This method is useful when you want to inject custom logic or configuration
// instead. // into the SDK's request lifecycle. Such as custom headers, or retry logic.
// //
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
// //
// // Example sending a request using the ListTablesRequest method. // // Example sending a request using the ListTablesRequest method.
// req, resp := client.ListTablesRequest(params) // req, resp := client.ListTablesRequest(params)
@@ -1263,19 +1253,18 @@ const opListTagsOfResource = "ListTagsOfResource"
// ListTagsOfResourceRequest generates a "aws/request.Request" representing the // ListTagsOfResourceRequest generates a "aws/request.Request" representing the
// client's request for the ListTagsOfResource operation. The "output" return // client's request for the ListTagsOfResource operation. The "output" return
// value can be used to capture response data after the request's "Send" method // value will be populated with the request's response once the request complets
// is called. // successfuly.
// //
// See ListTagsOfResource for usage and error information. // Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
// //
// Creating a request object using this method should be used when you want to inject // See ListTagsOfResource for more information on using the ListTagsOfResource
// custom logic into the request's lifecycle using a custom handler, or if you want to // API call, and error handling.
// access properties on the request object before or after sending the request. If //
// you just want the service response, call the ListTagsOfResource method directly // This method is useful when you want to inject custom logic or configuration
// instead. // into the SDK's request lifecycle. Such as custom headers, or retry logic.
// //
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
// //
// // Example sending a request using the ListTagsOfResourceRequest method. // // Example sending a request using the ListTagsOfResourceRequest method.
// req, resp := client.ListTagsOfResourceRequest(params) // req, resp := client.ListTagsOfResourceRequest(params)
@@ -1351,19 +1340,18 @@ const opPutItem = "PutItem"
// PutItemRequest generates a "aws/request.Request" representing the // PutItemRequest generates a "aws/request.Request" representing the
// client's request for the PutItem operation. The "output" return // client's request for the PutItem operation. The "output" return
// value can be used to capture response data after the request's "Send" method // value will be populated with the request's response once the request complets
// is called. // successfuly.
// //
// See PutItem for usage and error information. // Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
// //
// Creating a request object using this method should be used when you want to inject // See PutItem for more information on using the PutItem
// custom logic into the request's lifecycle using a custom handler, or if you want to // API call, and error handling.
// access properties on the request object before or after sending the request. If //
// you just want the service response, call the PutItem method directly // This method is useful when you want to inject custom logic or configuration
// instead. // into the SDK's request lifecycle. Such as custom headers, or retry logic.
// //
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
// //
// // Example sending a request using the PutItemRequest method. // // Example sending a request using the PutItemRequest method.
// req, resp := client.PutItemRequest(params) // req, resp := client.PutItemRequest(params)
@@ -1397,10 +1385,31 @@ func (c *DynamoDB) PutItemRequest(input *PutItemInput) (req *request.Request, ou
// table, the new item completely replaces the existing item. You can perform // table, the new item completely replaces the existing item. You can perform
// a conditional put operation (add a new item if one with the specified primary // a conditional put operation (add a new item if one with the specified primary
// key doesn't exist), or replace an existing item if it has certain attribute // key doesn't exist), or replace an existing item if it has certain attribute
// values. // values. You can return the item's attribute values in the same operation,
// using the ReturnValues parameter.
// //
// In addition to putting an item, you can also return the item's attribute // This topic provides general information about the PutItem API.
// values in the same operation, using the ReturnValues parameter. //
// For information on how to call the PutItem API using the AWS SDK in specific
// languages, see the following:
//
// PutItem in the AWS Command Line Interface (http://docs.aws.amazon.com/goto/aws-cli/dynamodb-2012-08-10/PutItem)
//
// PutItem in the AWS SDK for .NET (http://docs.aws.amazon.com/goto/DotNetSDKV3/dynamodb-2012-08-10/PutItem)
//
// PutItem in the AWS SDK for C++ (http://docs.aws.amazon.com/goto/SdkForCpp/dynamodb-2012-08-10/PutItem)
//
// PutItem in the AWS SDK for Go (http://docs.aws.amazon.com/goto/SdkForGoV1/dynamodb-2012-08-10/PutItem)
//
// PutItem in the AWS SDK for Java (http://docs.aws.amazon.com/goto/SdkForJava/dynamodb-2012-08-10/PutItem)
//
// PutItem in the AWS SDK for JavaScript (http://docs.aws.amazon.com/goto/AWSJavaScriptSDK/dynamodb-2012-08-10/PutItem)
//
// PutItem in the AWS SDK for PHP V3 (http://docs.aws.amazon.com/goto/SdkForPHPV3/dynamodb-2012-08-10/PutItem)
//
// PutItem in the AWS SDK for Python (http://docs.aws.amazon.com/goto/boto3/dynamodb-2012-08-10/PutItem)
//
// PutItem in the AWS SDK for Ruby V2 (http://docs.aws.amazon.com/goto/SdkForRubyV2/dynamodb-2012-08-10/PutItem)
// //
// When you add an item, the primary key attribute(s) are the only required // When you add an item, the primary key attribute(s) are the only required
// attributes. Attribute values cannot be null. String and Binary type attributes // attributes. Attribute values cannot be null. String and Binary type attributes
@@ -1472,19 +1481,18 @@ const opQuery = "Query"
// QueryRequest generates a "aws/request.Request" representing the // QueryRequest generates a "aws/request.Request" representing the
// client's request for the Query operation. The "output" return // client's request for the Query operation. The "output" return
// value can be used to capture response data after the request's "Send" method // value will be populated with the request's response once the request complets
// is called. // successfuly.
// //
// See Query for usage and error information. // Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
// //
// Creating a request object using this method should be used when you want to inject // See Query for more information on using the Query
// custom logic into the request's lifecycle using a custom handler, or if you want to // API call, and error handling.
// access properties on the request object before or after sending the request. If //
// you just want the service response, call the Query method directly // This method is useful when you want to inject custom logic or configuration
// instead. // into the SDK's request lifecycle. Such as custom headers, or retry logic.
// //
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
// //
// // Example sending a request using the QueryRequest method. // // Example sending a request using the QueryRequest method.
// req, resp := client.QueryRequest(params) // req, resp := client.QueryRequest(params)
@@ -1519,26 +1527,48 @@ func (c *DynamoDB) QueryRequest(input *QueryInput) (req *request.Request, output
// Query API operation for Amazon DynamoDB. // Query API operation for Amazon DynamoDB.
// //
// A Query operation uses the primary key of a table or a secondary index to // The Query operation finds items based on primary key values. You can query
// directly access items from that table or index. // any table or secondary index that has a composite primary key (a partition
// key and a sort key).
// //
// Use the KeyConditionExpression parameter to provide a specific value for // Use the KeyConditionExpression parameter to provide a specific value for
// the partition key. The Query operation will return all of the items from // the partition key. The Query operation will return all of the items from
// the table or index with that partition key value. You can optionally narrow // the table or index with that partition key value. You can optionally narrow
// the scope of the Query operation by specifying a sort key value and a comparison // the scope of the Query operation by specifying a sort key value and a comparison
// operator in KeyConditionExpression. You can use the ScanIndexForward parameter // operator in KeyConditionExpression. To further refine the Query results,
// to get results in forward or reverse order, by sort key. // you can optionally provide a FilterExpression. A FilterExpression determines
// which items within the results should be returned to you. All of the other
// results are discarded.
// //
// Queries that do not return results consume the minimum number of read capacity // A Query operation always returns a result set. If no matching items are found,
// units for that type of read operation. // the result set will be empty. Queries that do not return results consume
// the minimum number of read capacity units for that type of read operation.
// //
// If the total number of items meeting the query criteria exceeds the result // DynamoDB calculates the number of read capacity units consumed based on item
// set size limit of 1 MB, the query stops and results are returned to the user // size, not on the amount of data that is returned to an application. The number
// with the LastEvaluatedKey element to continue the query in a subsequent operation. // of capacity units consumed will be the same whether you request all of the
// Unlike a Scan operation, a Query operation never returns both an empty result // attributes (the default behavior) or just some of them (using a projection
// set and a LastEvaluatedKey value. LastEvaluatedKey is only provided if you // expression). The number will also be the same whether or not you use a FilterExpression.
// have used the Limit parameter, or if the result set exceeds 1 MB (prior to //
// applying a filter). // Query results are always sorted by the sort key value. If the data type of
// the sort key is Number, the results are returned in numeric order; otherwise,
// the results are returned in order of UTF-8 bytes. By default, the sort order
// is ascending. To reverse the order, set the ScanIndexForward parameter to
// false.
//
// A single Query operation will read up to the maximum number of items set
// (if using the Limit parameter) or a maximum of 1 MB of data and then apply
// any filtering to the results using FilterExpression. If LastEvaluatedKey
// is present in the response, you will need to paginate the result set. For
// more information, see Paginating the Results (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Query.html#Query.Pagination)
// in the Amazon DynamoDB Developer Guide.
//
// FilterExpression is applied after a Query finishes, but before the results
// are returned. A FilterExpression cannot contain partition key or sort key
// attributes. You need to specify those attributes in the KeyConditionExpression.
//
// A Query operation can return an empty result set and a LastEvaluatedKey if
// all the items read for the page of results are filtered out.
// //
// You can query a table, a local secondary index, or a global secondary index. // You can query a table, a local secondary index, or a global secondary index.
// For a query on a table or on a local secondary index, you can set the ConsistentRead // For a query on a table or on a local secondary index, you can set the ConsistentRead
@@ -1645,19 +1675,18 @@ const opScan = "Scan"
// ScanRequest generates a "aws/request.Request" representing the // ScanRequest generates a "aws/request.Request" representing the
// client's request for the Scan operation. The "output" return // client's request for the Scan operation. The "output" return
// value can be used to capture response data after the request's "Send" method // value will be populated with the request's response once the request complets
// is called. // successfuly.
// //
// See Scan for usage and error information. // Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
// //
// Creating a request object using this method should be used when you want to inject // See Scan for more information on using the Scan
// custom logic into the request's lifecycle using a custom handler, or if you want to // API call, and error handling.
// access properties on the request object before or after sending the request. If //
// you just want the service response, call the Scan method directly // This method is useful when you want to inject custom logic or configuration
// instead. // into the SDK's request lifecycle. Such as custom headers, or retry logic.
// //
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
// //
// // Example sending a request using the ScanRequest method. // // Example sending a request using the ScanRequest method.
// req, resp := client.ScanRequest(params) // req, resp := client.ScanRequest(params)
@@ -1702,16 +1731,23 @@ func (c *DynamoDB) ScanRequest(input *ScanInput) (req *request.Request, output *
// the number of items exceeding the limit. A scan can result in no table data // the number of items exceeding the limit. A scan can result in no table data
// meeting the filter criteria. // meeting the filter criteria.
// //
// By default, Scan operations proceed sequentially; however, for faster performance // A single Scan operation will read up to the maximum number of items set (if
// on a large table or secondary index, applications can request a parallel // using the Limit parameter) or a maximum of 1 MB of data and then apply any
// Scan operation by providing the Segment and TotalSegments parameters. For // filtering to the results using FilterExpression. If LastEvaluatedKey is present
// more information, see Parallel Scan (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html#QueryAndScanParallelScan) // in the response, you will need to paginate the result set. For more information,
// see Paginating the Results (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.Pagination)
// in the Amazon DynamoDB Developer Guide. // in the Amazon DynamoDB Developer Guide.
// //
// By default, Scan uses eventually consistent reads when accessing the data // Scan operations proceed sequentially; however, for faster performance on
// in a table; therefore, the result set might not include the changes to data // a large table or secondary index, applications can request a parallel Scan
// in the table immediately before the operation began. If you need a consistent // operation by providing the Segment and TotalSegments parameters. For more
// copy of the data, as of the time that the Scan begins, you can set the ConsistentRead // information, see Parallel Scan (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Scan.html#Scan.ParallelScan)
// in the Amazon DynamoDB Developer Guide.
//
// Scan uses eventually consistent reads when accessing the data in a table;
// therefore, the result set might not include the changes to data in the table
// immediately before the operation began. If you need a consistent copy of
// the data, as of the time that the Scan begins, you can set the ConsistentRead
// parameter to true. // parameter to true.
// //
// Returns awserr.Error for service API and SDK errors. Use runtime type assertions // Returns awserr.Error for service API and SDK errors. Use runtime type assertions
@@ -1813,19 +1849,18 @@ const opTagResource = "TagResource"
// TagResourceRequest generates a "aws/request.Request" representing the // TagResourceRequest generates a "aws/request.Request" representing the
// client's request for the TagResource operation. The "output" return // client's request for the TagResource operation. The "output" return
// value can be used to capture response data after the request's "Send" method // value will be populated with the request's response once the request complets
// is called. // successfuly.
// //
// See TagResource for usage and error information. // Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
// //
// Creating a request object using this method should be used when you want to inject // See TagResource for more information on using the TagResource
// custom logic into the request's lifecycle using a custom handler, or if you want to // API call, and error handling.
// access properties on the request object before or after sending the request. If //
// you just want the service response, call the TagResource method directly // This method is useful when you want to inject custom logic or configuration
// instead. // into the SDK's request lifecycle. Such as custom headers, or retry logic.
// //
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
// //
// // Example sending a request using the TagResourceRequest method. // // Example sending a request using the TagResourceRequest method.
// req, resp := client.TagResourceRequest(params) // req, resp := client.TagResourceRequest(params)
@@ -1920,19 +1955,18 @@ const opUntagResource = "UntagResource"
// UntagResourceRequest generates a "aws/request.Request" representing the // UntagResourceRequest generates a "aws/request.Request" representing the
// client's request for the UntagResource operation. The "output" return // client's request for the UntagResource operation. The "output" return
// value can be used to capture response data after the request's "Send" method // value will be populated with the request's response once the request complets
// is called. // successfuly.
// //
// See UntagResource for usage and error information. // Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
// //
// Creating a request object using this method should be used when you want to inject // See UntagResource for more information on using the UntagResource
// custom logic into the request's lifecycle using a custom handler, or if you want to // API call, and error handling.
// access properties on the request object before or after sending the request. If //
// you just want the service response, call the UntagResource method directly // This method is useful when you want to inject custom logic or configuration
// instead. // into the SDK's request lifecycle. Such as custom headers, or retry logic.
// //
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
// //
// // Example sending a request using the UntagResourceRequest method. // // Example sending a request using the UntagResourceRequest method.
// req, resp := client.UntagResourceRequest(params) // req, resp := client.UntagResourceRequest(params)
@@ -2025,19 +2059,18 @@ const opUpdateItem = "UpdateItem"
// UpdateItemRequest generates a "aws/request.Request" representing the // UpdateItemRequest generates a "aws/request.Request" representing the
// client's request for the UpdateItem operation. The "output" return // client's request for the UpdateItem operation. The "output" return
// value can be used to capture response data after the request's "Send" method // value will be populated with the request's response once the request complets
// is called. // successfuly.
// //
// See UpdateItem for usage and error information. // Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
// //
// Creating a request object using this method should be used when you want to inject // See UpdateItem for more information on using the UpdateItem
// custom logic into the request's lifecycle using a custom handler, or if you want to // API call, and error handling.
// access properties on the request object before or after sending the request. If //
// you just want the service response, call the UpdateItem method directly // This method is useful when you want to inject custom logic or configuration
// instead. // into the SDK's request lifecycle. Such as custom headers, or retry logic.
// //
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
// //
// // Example sending a request using the UpdateItemRequest method. // // Example sending a request using the UpdateItemRequest method.
// req, resp := client.UpdateItemRequest(params) // req, resp := client.UpdateItemRequest(params)
@@ -2131,19 +2164,18 @@ const opUpdateTable = "UpdateTable"
// UpdateTableRequest generates a "aws/request.Request" representing the // UpdateTableRequest generates a "aws/request.Request" representing the
// client's request for the UpdateTable operation. The "output" return // client's request for the UpdateTable operation. The "output" return
// value can be used to capture response data after the request's "Send" method // value will be populated with the request's response once the request complets
// is called. // successfuly.
// //
// See UpdateTable for usage and error information. // Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
// //
// Creating a request object using this method should be used when you want to inject // See UpdateTable for more information on using the UpdateTable
// custom logic into the request's lifecycle using a custom handler, or if you want to // API call, and error handling.
// access properties on the request object before or after sending the request. If //
// you just want the service response, call the UpdateTable method directly // This method is useful when you want to inject custom logic or configuration
// instead. // into the SDK's request lifecycle. Such as custom headers, or retry logic.
// //
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
// //
// // Example sending a request using the UpdateTableRequest method. // // Example sending a request using the UpdateTableRequest method.
// req, resp := client.UpdateTableRequest(params) // req, resp := client.UpdateTableRequest(params)
@@ -2247,19 +2279,18 @@ const opUpdateTimeToLive = "UpdateTimeToLive"
// UpdateTimeToLiveRequest generates a "aws/request.Request" representing the // UpdateTimeToLiveRequest generates a "aws/request.Request" representing the
// client's request for the UpdateTimeToLive operation. The "output" return // client's request for the UpdateTimeToLive operation. The "output" return
// value can be used to capture response data after the request's "Send" method // value will be populated with the request's response once the request complets
// is called. // successfuly.
// //
// See UpdateTimeToLive for usage and error information. // Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
// //
// Creating a request object using this method should be used when you want to inject // See UpdateTimeToLive for more information on using the UpdateTimeToLive
// custom logic into the request's lifecycle using a custom handler, or if you want to // API call, and error handling.
// access properties on the request object before or after sending the request. If //
// you just want the service response, call the UpdateTimeToLive method directly // This method is useful when you want to inject custom logic or configuration
// instead. // into the SDK's request lifecycle. Such as custom headers, or retry logic.
// //
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
// //
// // Example sending a request using the UpdateTimeToLiveRequest method. // // Example sending a request using the UpdateTimeToLiveRequest method.
// req, resp := client.UpdateTimeToLiveRequest(params) // req, resp := client.UpdateTimeToLiveRequest(params)
@@ -2288,11 +2319,11 @@ func (c *DynamoDB) UpdateTimeToLiveRequest(input *UpdateTimeToLiveInput) (req *r
// UpdateTimeToLive API operation for Amazon DynamoDB. // UpdateTimeToLive API operation for Amazon DynamoDB.
// //
// Specify the lifetime of individual table items. The database automatically // The UpdateTimeToLive method will enable or disable TTL for the specified
// removes the item at the expiration of the item. The UpdateTimeToLive method // table. A successful UpdateTimeToLive call returns the current TimeToLiveSpecification;
// will enable or disable TTL for the specified table. A successful UpdateTimeToLive // it may take up to one hour for the change to fully process. Any additional
// call returns the current TimeToLiveSpecification; it may take up to one hour // UpdateTimeToLive calls for the same table during this one hour duration result
// for the change to fully process. // in a ValidationException.
// //
// TTL compares the current time in epoch time format to the time stored in // TTL compares the current time in epoch time format to the time stored in
// the TTL attribute of an item. If the epoch time value stored in the attribute // the TTL attribute of an item. If the epoch time value stored in the attribute
@@ -2908,8 +2939,8 @@ type BatchWriteItemInput struct {
// * DeleteRequest - Perform a DeleteItem operation on the specified item. // * DeleteRequest - Perform a DeleteItem operation on the specified item.
// The item to be deleted is identified by a Key subelement: // The item to be deleted is identified by a Key subelement:
// //
// Key - A map of primary key attribute values that uniquely identify the ! // Key - A map of primary key attribute values that uniquely identify the item.
// item. Each entry in this map consists of an attribute name and an attribute // Each entry in this map consists of an attribute name and an attribute
// value. For each primary key, you must provide all of the key attributes. // value. For each primary key, you must provide all of the key attributes.
// For example, with a simple primary key, you only need to provide a value // For example, with a simple primary key, you only need to provide a value
// for the partition key. For a composite primary key, you must provide values // for the partition key. For a composite primary key, you must provide values
@@ -3786,7 +3817,7 @@ type DeleteItemInput struct {
// in the Amazon DynamoDB Developer Guide. // in the Amazon DynamoDB Developer Guide.
ConditionalOperator *string `type:"string" enum:"ConditionalOperator"` ConditionalOperator *string `type:"string" enum:"ConditionalOperator"`
// This is a legacy parameter. Use ConditionExpresssion instead. For more information, // This is a legacy parameter. Use ConditionExpression instead. For more information,
// see Expected (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html) // see Expected (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html)
// in the Amazon DynamoDB Developer Guide. // in the Amazon DynamoDB Developer Guide.
Expected map[string]*ExpectedAttributeValue `type:"map"` Expected map[string]*ExpectedAttributeValue `type:"map"`
@@ -5953,7 +5984,7 @@ type PutItemInput struct {
// in the Amazon DynamoDB Developer Guide. // in the Amazon DynamoDB Developer Guide.
ConditionalOperator *string `type:"string" enum:"ConditionalOperator"` ConditionalOperator *string `type:"string" enum:"ConditionalOperator"`
// This is a legacy parameter. Use ConditionExpresssion instead. For more information, // This is a legacy parameter. Use ConditionExpression instead. For more information,
// see Expected (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html) // see Expected (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html)
// in the Amazon DynamoDB Developer Guide. // in the Amazon DynamoDB Developer Guide.
Expected map[string]*ExpectedAttributeValue `type:"map"` Expected map[string]*ExpectedAttributeValue `type:"map"`
@@ -8050,7 +8081,7 @@ type UpdateItemInput struct {
// in the Amazon DynamoDB Developer Guide. // in the Amazon DynamoDB Developer Guide.
ConditionalOperator *string `type:"string" enum:"ConditionalOperator"` ConditionalOperator *string `type:"string" enum:"ConditionalOperator"`
// This is a legacy parameter. Use ConditionExpresssion instead. For more information, // This is a legacy parameter. Use ConditionExpression instead. For more information,
// see Expected (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html) // see Expected (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/LegacyConditionalParameters.Expected.html)
// in the Amazon DynamoDB Developer Guide. // in the Amazon DynamoDB Developer Guide.
Expected map[string]*ExpectedAttributeValue `type:"map"` Expected map[string]*ExpectedAttributeValue `type:"map"`
@@ -8148,9 +8179,8 @@ type UpdateItemInput struct {
// (the default), no statistics are returned. // (the default), no statistics are returned.
ReturnItemCollectionMetrics *string `type:"string" enum:"ReturnItemCollectionMetrics"` ReturnItemCollectionMetrics *string `type:"string" enum:"ReturnItemCollectionMetrics"`
// Use ReturnValues if you want to get the item attributes as they appeared // Use ReturnValues if you want to get the item attributes as they appear before
// either before or after they were updated. For UpdateItem, the valid values // or after they are updated. For UpdateItem, the valid values are:
// are:
// //
// * NONE - If ReturnValues is not specified, or if its value is NONE, then // * NONE - If ReturnValues is not specified, or if its value is NONE, then
// nothing is returned. (This setting is the default for ReturnValues.) // nothing is returned. (This setting is the default for ReturnValues.)
@@ -8169,9 +8199,9 @@ type UpdateItemInput struct {
// //
// There is no additional cost associated with requesting a return value aside // There is no additional cost associated with requesting a return value aside
// from the small network and processing overhead of receiving a larger response. // from the small network and processing overhead of receiving a larger response.
// No Read Capacity Units are consumed. // No read capacity units are consumed.
// //
// Values returned are strongly consistent // The values returned are strongly consistent.
ReturnValues *string `type:"string" enum:"ReturnValue"` ReturnValues *string `type:"string" enum:"ReturnValue"`
// The name of the table containing the item to update. // The name of the table containing the item to update.
@@ -8361,9 +8391,11 @@ func (s *UpdateItemInput) SetUpdateExpression(v string) *UpdateItemInput {
type UpdateItemOutput struct { type UpdateItemOutput struct {
_ struct{} `type:"structure"` _ struct{} `type:"structure"`
// A map of attribute values as they appeared before the UpdateItem operation. // A map of attribute values as they appear before or after the UpdateItem operation,
// This map only appears if ReturnValues was specified as something other than // as determined by the ReturnValues parameter.
// NONE in the request. Each element represents one attribute. //
// The Attributes map is only present if ReturnValues was specified as something
// other than NONE in the request. Each element represents one attribute.
Attributes map[string]*AttributeValue `type:"map"` Attributes map[string]*AttributeValue `type:"map"`
// The capacity units consumed by the UpdateItem operation. The data returned // The capacity units consumed by the UpdateItem operation. The data returned

View File

@@ -29,69 +29,17 @@
// //
// Using the Client // Using the Client
// //
// To use the client for Amazon DynamoDB you will first need // To Amazon DynamoDB with the SDK use the New function to create
// to create a new instance of it. // a new service client. With that client you can make API requests to the service.
// These clients are safe to use concurrently.
// //
// When creating a client for an AWS service you'll first need to have a Session // See the SDK's documentation for more information on how to use the SDK.
// already created. The Session provides configuration that can be shared
// between multiple service clients. Additional configuration can be applied to
// the Session and service's client when they are constructed. The aws package's
// Config type contains several fields such as Region for the AWS Region the
// client should make API requests too. The optional Config value can be provided
// as the variadic argument for Sessions and client creation.
//
// Once the service's client is created you can use it to make API requests the
// AWS service. These clients are safe to use concurrently.
//
// // Create a session to share configuration, and load external configuration.
// sess := session.Must(session.NewSession())
//
// // Create the service's client with the session.
// svc := dynamodb.New(sess)
//
// See the SDK's documentation for more information on how to use service clients.
// https://docs.aws.amazon.com/sdk-for-go/api/ // https://docs.aws.amazon.com/sdk-for-go/api/
// //
// See aws package's Config type for more information on configuration options. // See aws.Config documentation for more information on configuring SDK clients.
// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config // https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
// //
// See the Amazon DynamoDB client DynamoDB for more // See the Amazon DynamoDB client DynamoDB for more
// information on creating the service's client. // information on creating client for this service.
// https://docs.aws.amazon.com/sdk-for-go/api/service/dynamodb/#New // https://docs.aws.amazon.com/sdk-for-go/api/service/dynamodb/#New
//
// Once the client is created you can make an API request to the service.
// Each API method takes a input parameter, and returns the service response
// and an error.
//
// The API method will document which error codes the service can be returned
// by the operation if the service models the API operation's errors. These
// errors will also be available as const strings prefixed with "ErrCode".
//
// result, err := svc.BatchGetItem(params)
// if err != nil {
// // Cast err to awserr.Error to handle specific error codes.
// aerr, ok := err.(awserr.Error)
// if ok && aerr.Code() == <error code to check for> {
// // Specific error code handling
// }
// return err
// }
//
// fmt.Println("BatchGetItem result:")
// fmt.Println(result)
//
// Using the Client with Context
//
// The service's client also provides methods to make API requests with a Context
// value. This allows you to control the timeout, and cancellation of pending
// requests. These methods also take request Option as variadic parameter to apply
// additional configuration to the API request.
//
// ctx := context.Background()
//
// result, err := svc.BatchGetItemWithContext(ctx, params)
//
// See the request package documentation for more information on using Context pattern
// with the SDK.
// https://docs.aws.amazon.com/sdk-for-go/api/aws/request/
package dynamodb package dynamodb

View File

@@ -34,7 +34,7 @@
// panic(fmt.Sprintf("failed to DynamoDB marshal Record, %v", err)) // panic(fmt.Sprintf("failed to DynamoDB marshal Record, %v", err))
// } // }
// //
// _, err := r.svc.PutItem(&dynamodb.PutItemInput{ // _, err = svc.PutItem(&dynamodb.PutItemInput{
// TableName: aws.String(myTableName), // TableName: aws.String(myTableName),
// Item: av, // Item: av,
// }) // })

View File

@@ -11,7 +11,7 @@ import (
// WaitUntilTableExists uses the DynamoDB API operation // WaitUntilTableExists uses the DynamoDB API operation
// DescribeTable to wait for a condition to be met before returning. // DescribeTable to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will // If the condition is not met within the max attempt window, an error will
// be returned. // be returned.
func (c *DynamoDB) WaitUntilTableExists(input *DescribeTableInput) error { func (c *DynamoDB) WaitUntilTableExists(input *DescribeTableInput) error {
return c.WaitUntilTableExistsWithContext(aws.BackgroundContext(), input) return c.WaitUntilTableExistsWithContext(aws.BackgroundContext(), input)
@@ -62,7 +62,7 @@ func (c *DynamoDB) WaitUntilTableExistsWithContext(ctx aws.Context, input *Descr
// WaitUntilTableNotExists uses the DynamoDB API operation // WaitUntilTableNotExists uses the DynamoDB API operation
// DescribeTable to wait for a condition to be met before returning. // DescribeTable to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will // If the condition is not met within the max attempt window, an error will
// be returned. // be returned.
func (c *DynamoDB) WaitUntilTableNotExists(input *DescribeTableInput) error { func (c *DynamoDB) WaitUntilTableNotExists(input *DescribeTableInput) error {
return c.WaitUntilTableNotExistsWithContext(aws.BackgroundContext(), input) return c.WaitUntilTableNotExistsWithContext(aws.BackgroundContext(), input)

File diff suppressed because it is too large Load Diff

View File

@@ -15,69 +15,17 @@
// //
// Using the Client // Using the Client
// //
// To use the client for Amazon Elastic Compute Cloud you will first need // To Amazon Elastic Compute Cloud with the SDK use the New function to create
// to create a new instance of it. // a new service client. With that client you can make API requests to the service.
// These clients are safe to use concurrently.
// //
// When creating a client for an AWS service you'll first need to have a Session // See the SDK's documentation for more information on how to use the SDK.
// already created. The Session provides configuration that can be shared
// between multiple service clients. Additional configuration can be applied to
// the Session and service's client when they are constructed. The aws package's
// Config type contains several fields such as Region for the AWS Region the
// client should make API requests too. The optional Config value can be provided
// as the variadic argument for Sessions and client creation.
//
// Once the service's client is created you can use it to make API requests the
// AWS service. These clients are safe to use concurrently.
//
// // Create a session to share configuration, and load external configuration.
// sess := session.Must(session.NewSession())
//
// // Create the service's client with the session.
// svc := ec2.New(sess)
//
// See the SDK's documentation for more information on how to use service clients.
// https://docs.aws.amazon.com/sdk-for-go/api/ // https://docs.aws.amazon.com/sdk-for-go/api/
// //
// See aws package's Config type for more information on configuration options. // See aws.Config documentation for more information on configuring SDK clients.
// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config // https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
// //
// See the Amazon Elastic Compute Cloud client EC2 for more // See the Amazon Elastic Compute Cloud client EC2 for more
// information on creating the service's client. // information on creating client for this service.
// https://docs.aws.amazon.com/sdk-for-go/api/service/ec2/#New // https://docs.aws.amazon.com/sdk-for-go/api/service/ec2/#New
//
// Once the client is created you can make an API request to the service.
// Each API method takes a input parameter, and returns the service response
// and an error.
//
// The API method will document which error codes the service can be returned
// by the operation if the service models the API operation's errors. These
// errors will also be available as const strings prefixed with "ErrCode".
//
// result, err := svc.AcceptReservedInstancesExchangeQuote(params)
// if err != nil {
// // Cast err to awserr.Error to handle specific error codes.
// aerr, ok := err.(awserr.Error)
// if ok && aerr.Code() == <error code to check for> {
// // Specific error code handling
// }
// return err
// }
//
// fmt.Println("AcceptReservedInstancesExchangeQuote result:")
// fmt.Println(result)
//
// Using the Client with Context
//
// The service's client also provides methods to make API requests with a Context
// value. This allows you to control the timeout, and cancellation of pending
// requests. These methods also take request Option as variadic parameter to apply
// additional configuration to the API request.
//
// ctx := context.Background()
//
// result, err := svc.AcceptReservedInstancesExchangeQuoteWithContext(ctx, params)
//
// See the request package documentation for more information on using Context pattern
// with the SDK.
// https://docs.aws.amazon.com/sdk-for-go/api/aws/request/
package ec2 package ec2

View File

@@ -11,7 +11,7 @@ import (
// WaitUntilBundleTaskComplete uses the Amazon EC2 API operation // WaitUntilBundleTaskComplete uses the Amazon EC2 API operation
// DescribeBundleTasks to wait for a condition to be met before returning. // DescribeBundleTasks to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will // If the condition is not met within the max attempt window, an error will
// be returned. // be returned.
func (c *EC2) WaitUntilBundleTaskComplete(input *DescribeBundleTasksInput) error { func (c *EC2) WaitUntilBundleTaskComplete(input *DescribeBundleTasksInput) error {
return c.WaitUntilBundleTaskCompleteWithContext(aws.BackgroundContext(), input) return c.WaitUntilBundleTaskCompleteWithContext(aws.BackgroundContext(), input)
@@ -62,7 +62,7 @@ func (c *EC2) WaitUntilBundleTaskCompleteWithContext(ctx aws.Context, input *Des
// WaitUntilConversionTaskCancelled uses the Amazon EC2 API operation // WaitUntilConversionTaskCancelled uses the Amazon EC2 API operation
// DescribeConversionTasks to wait for a condition to be met before returning. // DescribeConversionTasks to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will // If the condition is not met within the max attempt window, an error will
// be returned. // be returned.
func (c *EC2) WaitUntilConversionTaskCancelled(input *DescribeConversionTasksInput) error { func (c *EC2) WaitUntilConversionTaskCancelled(input *DescribeConversionTasksInput) error {
return c.WaitUntilConversionTaskCancelledWithContext(aws.BackgroundContext(), input) return c.WaitUntilConversionTaskCancelledWithContext(aws.BackgroundContext(), input)
@@ -108,7 +108,7 @@ func (c *EC2) WaitUntilConversionTaskCancelledWithContext(ctx aws.Context, input
// WaitUntilConversionTaskCompleted uses the Amazon EC2 API operation // WaitUntilConversionTaskCompleted uses the Amazon EC2 API operation
// DescribeConversionTasks to wait for a condition to be met before returning. // DescribeConversionTasks to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will // If the condition is not met within the max attempt window, an error will
// be returned. // be returned.
func (c *EC2) WaitUntilConversionTaskCompleted(input *DescribeConversionTasksInput) error { func (c *EC2) WaitUntilConversionTaskCompleted(input *DescribeConversionTasksInput) error {
return c.WaitUntilConversionTaskCompletedWithContext(aws.BackgroundContext(), input) return c.WaitUntilConversionTaskCompletedWithContext(aws.BackgroundContext(), input)
@@ -164,7 +164,7 @@ func (c *EC2) WaitUntilConversionTaskCompletedWithContext(ctx aws.Context, input
// WaitUntilConversionTaskDeleted uses the Amazon EC2 API operation // WaitUntilConversionTaskDeleted uses the Amazon EC2 API operation
// DescribeConversionTasks to wait for a condition to be met before returning. // DescribeConversionTasks to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will // If the condition is not met within the max attempt window, an error will
// be returned. // be returned.
func (c *EC2) WaitUntilConversionTaskDeleted(input *DescribeConversionTasksInput) error { func (c *EC2) WaitUntilConversionTaskDeleted(input *DescribeConversionTasksInput) error {
return c.WaitUntilConversionTaskDeletedWithContext(aws.BackgroundContext(), input) return c.WaitUntilConversionTaskDeletedWithContext(aws.BackgroundContext(), input)
@@ -210,7 +210,7 @@ func (c *EC2) WaitUntilConversionTaskDeletedWithContext(ctx aws.Context, input *
// WaitUntilCustomerGatewayAvailable uses the Amazon EC2 API operation // WaitUntilCustomerGatewayAvailable uses the Amazon EC2 API operation
// DescribeCustomerGateways to wait for a condition to be met before returning. // DescribeCustomerGateways to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will // If the condition is not met within the max attempt window, an error will
// be returned. // be returned.
func (c *EC2) WaitUntilCustomerGatewayAvailable(input *DescribeCustomerGatewaysInput) error { func (c *EC2) WaitUntilCustomerGatewayAvailable(input *DescribeCustomerGatewaysInput) error {
return c.WaitUntilCustomerGatewayAvailableWithContext(aws.BackgroundContext(), input) return c.WaitUntilCustomerGatewayAvailableWithContext(aws.BackgroundContext(), input)
@@ -266,7 +266,7 @@ func (c *EC2) WaitUntilCustomerGatewayAvailableWithContext(ctx aws.Context, inpu
// WaitUntilExportTaskCancelled uses the Amazon EC2 API operation // WaitUntilExportTaskCancelled uses the Amazon EC2 API operation
// DescribeExportTasks to wait for a condition to be met before returning. // DescribeExportTasks to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will // If the condition is not met within the max attempt window, an error will
// be returned. // be returned.
func (c *EC2) WaitUntilExportTaskCancelled(input *DescribeExportTasksInput) error { func (c *EC2) WaitUntilExportTaskCancelled(input *DescribeExportTasksInput) error {
return c.WaitUntilExportTaskCancelledWithContext(aws.BackgroundContext(), input) return c.WaitUntilExportTaskCancelledWithContext(aws.BackgroundContext(), input)
@@ -312,7 +312,7 @@ func (c *EC2) WaitUntilExportTaskCancelledWithContext(ctx aws.Context, input *De
// WaitUntilExportTaskCompleted uses the Amazon EC2 API operation // WaitUntilExportTaskCompleted uses the Amazon EC2 API operation
// DescribeExportTasks to wait for a condition to be met before returning. // DescribeExportTasks to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will // If the condition is not met within the max attempt window, an error will
// be returned. // be returned.
func (c *EC2) WaitUntilExportTaskCompleted(input *DescribeExportTasksInput) error { func (c *EC2) WaitUntilExportTaskCompleted(input *DescribeExportTasksInput) error {
return c.WaitUntilExportTaskCompletedWithContext(aws.BackgroundContext(), input) return c.WaitUntilExportTaskCompletedWithContext(aws.BackgroundContext(), input)
@@ -358,7 +358,7 @@ func (c *EC2) WaitUntilExportTaskCompletedWithContext(ctx aws.Context, input *De
// WaitUntilImageAvailable uses the Amazon EC2 API operation // WaitUntilImageAvailable uses the Amazon EC2 API operation
// DescribeImages to wait for a condition to be met before returning. // DescribeImages to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will // If the condition is not met within the max attempt window, an error will
// be returned. // be returned.
func (c *EC2) WaitUntilImageAvailable(input *DescribeImagesInput) error { func (c *EC2) WaitUntilImageAvailable(input *DescribeImagesInput) error {
return c.WaitUntilImageAvailableWithContext(aws.BackgroundContext(), input) return c.WaitUntilImageAvailableWithContext(aws.BackgroundContext(), input)
@@ -409,7 +409,7 @@ func (c *EC2) WaitUntilImageAvailableWithContext(ctx aws.Context, input *Describ
// WaitUntilImageExists uses the Amazon EC2 API operation // WaitUntilImageExists uses the Amazon EC2 API operation
// DescribeImages to wait for a condition to be met before returning. // DescribeImages to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will // If the condition is not met within the max attempt window, an error will
// be returned. // be returned.
func (c *EC2) WaitUntilImageExists(input *DescribeImagesInput) error { func (c *EC2) WaitUntilImageExists(input *DescribeImagesInput) error {
return c.WaitUntilImageExistsWithContext(aws.BackgroundContext(), input) return c.WaitUntilImageExistsWithContext(aws.BackgroundContext(), input)
@@ -460,7 +460,7 @@ func (c *EC2) WaitUntilImageExistsWithContext(ctx aws.Context, input *DescribeIm
// WaitUntilInstanceExists uses the Amazon EC2 API operation // WaitUntilInstanceExists uses the Amazon EC2 API operation
// DescribeInstances to wait for a condition to be met before returning. // DescribeInstances to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will // If the condition is not met within the max attempt window, an error will
// be returned. // be returned.
func (c *EC2) WaitUntilInstanceExists(input *DescribeInstancesInput) error { func (c *EC2) WaitUntilInstanceExists(input *DescribeInstancesInput) error {
return c.WaitUntilInstanceExistsWithContext(aws.BackgroundContext(), input) return c.WaitUntilInstanceExistsWithContext(aws.BackgroundContext(), input)
@@ -511,7 +511,7 @@ func (c *EC2) WaitUntilInstanceExistsWithContext(ctx aws.Context, input *Describ
// WaitUntilInstanceRunning uses the Amazon EC2 API operation // WaitUntilInstanceRunning uses the Amazon EC2 API operation
// DescribeInstances to wait for a condition to be met before returning. // DescribeInstances to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will // If the condition is not met within the max attempt window, an error will
// be returned. // be returned.
func (c *EC2) WaitUntilInstanceRunning(input *DescribeInstancesInput) error { func (c *EC2) WaitUntilInstanceRunning(input *DescribeInstancesInput) error {
return c.WaitUntilInstanceRunningWithContext(aws.BackgroundContext(), input) return c.WaitUntilInstanceRunningWithContext(aws.BackgroundContext(), input)
@@ -577,7 +577,7 @@ func (c *EC2) WaitUntilInstanceRunningWithContext(ctx aws.Context, input *Descri
// WaitUntilInstanceStatusOk uses the Amazon EC2 API operation // WaitUntilInstanceStatusOk uses the Amazon EC2 API operation
// DescribeInstanceStatus to wait for a condition to be met before returning. // DescribeInstanceStatus to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will // If the condition is not met within the max attempt window, an error will
// be returned. // be returned.
func (c *EC2) WaitUntilInstanceStatusOk(input *DescribeInstanceStatusInput) error { func (c *EC2) WaitUntilInstanceStatusOk(input *DescribeInstanceStatusInput) error {
return c.WaitUntilInstanceStatusOkWithContext(aws.BackgroundContext(), input) return c.WaitUntilInstanceStatusOkWithContext(aws.BackgroundContext(), input)
@@ -628,7 +628,7 @@ func (c *EC2) WaitUntilInstanceStatusOkWithContext(ctx aws.Context, input *Descr
// WaitUntilInstanceStopped uses the Amazon EC2 API operation // WaitUntilInstanceStopped uses the Amazon EC2 API operation
// DescribeInstances to wait for a condition to be met before returning. // DescribeInstances to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will // If the condition is not met within the max attempt window, an error will
// be returned. // be returned.
func (c *EC2) WaitUntilInstanceStopped(input *DescribeInstancesInput) error { func (c *EC2) WaitUntilInstanceStopped(input *DescribeInstancesInput) error {
return c.WaitUntilInstanceStoppedWithContext(aws.BackgroundContext(), input) return c.WaitUntilInstanceStoppedWithContext(aws.BackgroundContext(), input)
@@ -684,7 +684,7 @@ func (c *EC2) WaitUntilInstanceStoppedWithContext(ctx aws.Context, input *Descri
// WaitUntilInstanceTerminated uses the Amazon EC2 API operation // WaitUntilInstanceTerminated uses the Amazon EC2 API operation
// DescribeInstances to wait for a condition to be met before returning. // DescribeInstances to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will // If the condition is not met within the max attempt window, an error will
// be returned. // be returned.
func (c *EC2) WaitUntilInstanceTerminated(input *DescribeInstancesInput) error { func (c *EC2) WaitUntilInstanceTerminated(input *DescribeInstancesInput) error {
return c.WaitUntilInstanceTerminatedWithContext(aws.BackgroundContext(), input) return c.WaitUntilInstanceTerminatedWithContext(aws.BackgroundContext(), input)
@@ -740,7 +740,7 @@ func (c *EC2) WaitUntilInstanceTerminatedWithContext(ctx aws.Context, input *Des
// WaitUntilKeyPairExists uses the Amazon EC2 API operation // WaitUntilKeyPairExists uses the Amazon EC2 API operation
// DescribeKeyPairs to wait for a condition to be met before returning. // DescribeKeyPairs to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will // If the condition is not met within the max attempt window, an error will
// be returned. // be returned.
func (c *EC2) WaitUntilKeyPairExists(input *DescribeKeyPairsInput) error { func (c *EC2) WaitUntilKeyPairExists(input *DescribeKeyPairsInput) error {
return c.WaitUntilKeyPairExistsWithContext(aws.BackgroundContext(), input) return c.WaitUntilKeyPairExistsWithContext(aws.BackgroundContext(), input)
@@ -791,7 +791,7 @@ func (c *EC2) WaitUntilKeyPairExistsWithContext(ctx aws.Context, input *Describe
// WaitUntilNatGatewayAvailable uses the Amazon EC2 API operation // WaitUntilNatGatewayAvailable uses the Amazon EC2 API operation
// DescribeNatGateways to wait for a condition to be met before returning. // DescribeNatGateways to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will // If the condition is not met within the max attempt window, an error will
// be returned. // be returned.
func (c *EC2) WaitUntilNatGatewayAvailable(input *DescribeNatGatewaysInput) error { func (c *EC2) WaitUntilNatGatewayAvailable(input *DescribeNatGatewaysInput) error {
return c.WaitUntilNatGatewayAvailableWithContext(aws.BackgroundContext(), input) return c.WaitUntilNatGatewayAvailableWithContext(aws.BackgroundContext(), input)
@@ -857,7 +857,7 @@ func (c *EC2) WaitUntilNatGatewayAvailableWithContext(ctx aws.Context, input *De
// WaitUntilNetworkInterfaceAvailable uses the Amazon EC2 API operation // WaitUntilNetworkInterfaceAvailable uses the Amazon EC2 API operation
// DescribeNetworkInterfaces to wait for a condition to be met before returning. // DescribeNetworkInterfaces to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will // If the condition is not met within the max attempt window, an error will
// be returned. // be returned.
func (c *EC2) WaitUntilNetworkInterfaceAvailable(input *DescribeNetworkInterfacesInput) error { func (c *EC2) WaitUntilNetworkInterfaceAvailable(input *DescribeNetworkInterfacesInput) error {
return c.WaitUntilNetworkInterfaceAvailableWithContext(aws.BackgroundContext(), input) return c.WaitUntilNetworkInterfaceAvailableWithContext(aws.BackgroundContext(), input)
@@ -908,7 +908,7 @@ func (c *EC2) WaitUntilNetworkInterfaceAvailableWithContext(ctx aws.Context, inp
// WaitUntilPasswordDataAvailable uses the Amazon EC2 API operation // WaitUntilPasswordDataAvailable uses the Amazon EC2 API operation
// GetPasswordData to wait for a condition to be met before returning. // GetPasswordData to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will // If the condition is not met within the max attempt window, an error will
// be returned. // be returned.
func (c *EC2) WaitUntilPasswordDataAvailable(input *GetPasswordDataInput) error { func (c *EC2) WaitUntilPasswordDataAvailable(input *GetPasswordDataInput) error {
return c.WaitUntilPasswordDataAvailableWithContext(aws.BackgroundContext(), input) return c.WaitUntilPasswordDataAvailableWithContext(aws.BackgroundContext(), input)
@@ -954,7 +954,7 @@ func (c *EC2) WaitUntilPasswordDataAvailableWithContext(ctx aws.Context, input *
// WaitUntilSnapshotCompleted uses the Amazon EC2 API operation // WaitUntilSnapshotCompleted uses the Amazon EC2 API operation
// DescribeSnapshots to wait for a condition to be met before returning. // DescribeSnapshots to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will // If the condition is not met within the max attempt window, an error will
// be returned. // be returned.
func (c *EC2) WaitUntilSnapshotCompleted(input *DescribeSnapshotsInput) error { func (c *EC2) WaitUntilSnapshotCompleted(input *DescribeSnapshotsInput) error {
return c.WaitUntilSnapshotCompletedWithContext(aws.BackgroundContext(), input) return c.WaitUntilSnapshotCompletedWithContext(aws.BackgroundContext(), input)
@@ -1000,7 +1000,7 @@ func (c *EC2) WaitUntilSnapshotCompletedWithContext(ctx aws.Context, input *Desc
// WaitUntilSpotInstanceRequestFulfilled uses the Amazon EC2 API operation // WaitUntilSpotInstanceRequestFulfilled uses the Amazon EC2 API operation
// DescribeSpotInstanceRequests to wait for a condition to be met before returning. // DescribeSpotInstanceRequests to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will // If the condition is not met within the max attempt window, an error will
// be returned. // be returned.
func (c *EC2) WaitUntilSpotInstanceRequestFulfilled(input *DescribeSpotInstanceRequestsInput) error { func (c *EC2) WaitUntilSpotInstanceRequestFulfilled(input *DescribeSpotInstanceRequestsInput) error {
return c.WaitUntilSpotInstanceRequestFulfilledWithContext(aws.BackgroundContext(), input) return c.WaitUntilSpotInstanceRequestFulfilledWithContext(aws.BackgroundContext(), input)
@@ -1045,6 +1045,11 @@ func (c *EC2) WaitUntilSpotInstanceRequestFulfilledWithContext(ctx aws.Context,
Matcher: request.PathAnyWaiterMatch, Argument: "SpotInstanceRequests[].Status.Code", Matcher: request.PathAnyWaiterMatch, Argument: "SpotInstanceRequests[].Status.Code",
Expected: "system-error", Expected: "system-error",
}, },
{
State: request.RetryWaiterState,
Matcher: request.ErrorWaiterMatch,
Expected: "InvalidSpotInstanceRequestID.NotFound",
},
}, },
Logger: c.Config.Logger, Logger: c.Config.Logger,
NewRequest: func(opts []request.Option) (*request.Request, error) { NewRequest: func(opts []request.Option) (*request.Request, error) {
@@ -1066,7 +1071,7 @@ func (c *EC2) WaitUntilSpotInstanceRequestFulfilledWithContext(ctx aws.Context,
// WaitUntilSubnetAvailable uses the Amazon EC2 API operation // WaitUntilSubnetAvailable uses the Amazon EC2 API operation
// DescribeSubnets to wait for a condition to be met before returning. // DescribeSubnets to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will // If the condition is not met within the max attempt window, an error will
// be returned. // be returned.
func (c *EC2) WaitUntilSubnetAvailable(input *DescribeSubnetsInput) error { func (c *EC2) WaitUntilSubnetAvailable(input *DescribeSubnetsInput) error {
return c.WaitUntilSubnetAvailableWithContext(aws.BackgroundContext(), input) return c.WaitUntilSubnetAvailableWithContext(aws.BackgroundContext(), input)
@@ -1112,7 +1117,7 @@ func (c *EC2) WaitUntilSubnetAvailableWithContext(ctx aws.Context, input *Descri
// WaitUntilSystemStatusOk uses the Amazon EC2 API operation // WaitUntilSystemStatusOk uses the Amazon EC2 API operation
// DescribeInstanceStatus to wait for a condition to be met before returning. // DescribeInstanceStatus to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will // If the condition is not met within the max attempt window, an error will
// be returned. // be returned.
func (c *EC2) WaitUntilSystemStatusOk(input *DescribeInstanceStatusInput) error { func (c *EC2) WaitUntilSystemStatusOk(input *DescribeInstanceStatusInput) error {
return c.WaitUntilSystemStatusOkWithContext(aws.BackgroundContext(), input) return c.WaitUntilSystemStatusOkWithContext(aws.BackgroundContext(), input)
@@ -1158,7 +1163,7 @@ func (c *EC2) WaitUntilSystemStatusOkWithContext(ctx aws.Context, input *Describ
// WaitUntilVolumeAvailable uses the Amazon EC2 API operation // WaitUntilVolumeAvailable uses the Amazon EC2 API operation
// DescribeVolumes to wait for a condition to be met before returning. // DescribeVolumes to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will // If the condition is not met within the max attempt window, an error will
// be returned. // be returned.
func (c *EC2) WaitUntilVolumeAvailable(input *DescribeVolumesInput) error { func (c *EC2) WaitUntilVolumeAvailable(input *DescribeVolumesInput) error {
return c.WaitUntilVolumeAvailableWithContext(aws.BackgroundContext(), input) return c.WaitUntilVolumeAvailableWithContext(aws.BackgroundContext(), input)
@@ -1209,7 +1214,7 @@ func (c *EC2) WaitUntilVolumeAvailableWithContext(ctx aws.Context, input *Descri
// WaitUntilVolumeDeleted uses the Amazon EC2 API operation // WaitUntilVolumeDeleted uses the Amazon EC2 API operation
// DescribeVolumes to wait for a condition to be met before returning. // DescribeVolumes to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will // If the condition is not met within the max attempt window, an error will
// be returned. // be returned.
func (c *EC2) WaitUntilVolumeDeleted(input *DescribeVolumesInput) error { func (c *EC2) WaitUntilVolumeDeleted(input *DescribeVolumesInput) error {
return c.WaitUntilVolumeDeletedWithContext(aws.BackgroundContext(), input) return c.WaitUntilVolumeDeletedWithContext(aws.BackgroundContext(), input)
@@ -1260,7 +1265,7 @@ func (c *EC2) WaitUntilVolumeDeletedWithContext(ctx aws.Context, input *Describe
// WaitUntilVolumeInUse uses the Amazon EC2 API operation // WaitUntilVolumeInUse uses the Amazon EC2 API operation
// DescribeVolumes to wait for a condition to be met before returning. // DescribeVolumes to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will // If the condition is not met within the max attempt window, an error will
// be returned. // be returned.
func (c *EC2) WaitUntilVolumeInUse(input *DescribeVolumesInput) error { func (c *EC2) WaitUntilVolumeInUse(input *DescribeVolumesInput) error {
return c.WaitUntilVolumeInUseWithContext(aws.BackgroundContext(), input) return c.WaitUntilVolumeInUseWithContext(aws.BackgroundContext(), input)
@@ -1311,7 +1316,7 @@ func (c *EC2) WaitUntilVolumeInUseWithContext(ctx aws.Context, input *DescribeVo
// WaitUntilVpcAvailable uses the Amazon EC2 API operation // WaitUntilVpcAvailable uses the Amazon EC2 API operation
// DescribeVpcs to wait for a condition to be met before returning. // DescribeVpcs to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will // If the condition is not met within the max attempt window, an error will
// be returned. // be returned.
func (c *EC2) WaitUntilVpcAvailable(input *DescribeVpcsInput) error { func (c *EC2) WaitUntilVpcAvailable(input *DescribeVpcsInput) error {
return c.WaitUntilVpcAvailableWithContext(aws.BackgroundContext(), input) return c.WaitUntilVpcAvailableWithContext(aws.BackgroundContext(), input)
@@ -1357,7 +1362,7 @@ func (c *EC2) WaitUntilVpcAvailableWithContext(ctx aws.Context, input *DescribeV
// WaitUntilVpcExists uses the Amazon EC2 API operation // WaitUntilVpcExists uses the Amazon EC2 API operation
// DescribeVpcs to wait for a condition to be met before returning. // DescribeVpcs to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will // If the condition is not met within the max attempt window, an error will
// be returned. // be returned.
func (c *EC2) WaitUntilVpcExists(input *DescribeVpcsInput) error { func (c *EC2) WaitUntilVpcExists(input *DescribeVpcsInput) error {
return c.WaitUntilVpcExistsWithContext(aws.BackgroundContext(), input) return c.WaitUntilVpcExistsWithContext(aws.BackgroundContext(), input)
@@ -1408,7 +1413,7 @@ func (c *EC2) WaitUntilVpcExistsWithContext(ctx aws.Context, input *DescribeVpcs
// WaitUntilVpcPeeringConnectionDeleted uses the Amazon EC2 API operation // WaitUntilVpcPeeringConnectionDeleted uses the Amazon EC2 API operation
// DescribeVpcPeeringConnections to wait for a condition to be met before returning. // DescribeVpcPeeringConnections to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will // If the condition is not met within the max attempt window, an error will
// be returned. // be returned.
func (c *EC2) WaitUntilVpcPeeringConnectionDeleted(input *DescribeVpcPeeringConnectionsInput) error { func (c *EC2) WaitUntilVpcPeeringConnectionDeleted(input *DescribeVpcPeeringConnectionsInput) error {
return c.WaitUntilVpcPeeringConnectionDeletedWithContext(aws.BackgroundContext(), input) return c.WaitUntilVpcPeeringConnectionDeletedWithContext(aws.BackgroundContext(), input)
@@ -1459,7 +1464,7 @@ func (c *EC2) WaitUntilVpcPeeringConnectionDeletedWithContext(ctx aws.Context, i
// WaitUntilVpcPeeringConnectionExists uses the Amazon EC2 API operation // WaitUntilVpcPeeringConnectionExists uses the Amazon EC2 API operation
// DescribeVpcPeeringConnections to wait for a condition to be met before returning. // DescribeVpcPeeringConnections to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will // If the condition is not met within the max attempt window, an error will
// be returned. // be returned.
func (c *EC2) WaitUntilVpcPeeringConnectionExists(input *DescribeVpcPeeringConnectionsInput) error { func (c *EC2) WaitUntilVpcPeeringConnectionExists(input *DescribeVpcPeeringConnectionsInput) error {
return c.WaitUntilVpcPeeringConnectionExistsWithContext(aws.BackgroundContext(), input) return c.WaitUntilVpcPeeringConnectionExistsWithContext(aws.BackgroundContext(), input)
@@ -1510,7 +1515,7 @@ func (c *EC2) WaitUntilVpcPeeringConnectionExistsWithContext(ctx aws.Context, in
// WaitUntilVpnConnectionAvailable uses the Amazon EC2 API operation // WaitUntilVpnConnectionAvailable uses the Amazon EC2 API operation
// DescribeVpnConnections to wait for a condition to be met before returning. // DescribeVpnConnections to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will // If the condition is not met within the max attempt window, an error will
// be returned. // be returned.
func (c *EC2) WaitUntilVpnConnectionAvailable(input *DescribeVpnConnectionsInput) error { func (c *EC2) WaitUntilVpnConnectionAvailable(input *DescribeVpnConnectionsInput) error {
return c.WaitUntilVpnConnectionAvailableWithContext(aws.BackgroundContext(), input) return c.WaitUntilVpnConnectionAvailableWithContext(aws.BackgroundContext(), input)
@@ -1566,7 +1571,7 @@ func (c *EC2) WaitUntilVpnConnectionAvailableWithContext(ctx aws.Context, input
// WaitUntilVpnConnectionDeleted uses the Amazon EC2 API operation // WaitUntilVpnConnectionDeleted uses the Amazon EC2 API operation
// DescribeVpnConnections to wait for a condition to be met before returning. // DescribeVpnConnections to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will // If the condition is not met within the max attempt window, an error will
// be returned. // be returned.
func (c *EC2) WaitUntilVpnConnectionDeleted(input *DescribeVpnConnectionsInput) error { func (c *EC2) WaitUntilVpnConnectionDeleted(input *DescribeVpnConnectionsInput) error {
return c.WaitUntilVpnConnectionDeletedWithContext(aws.BackgroundContext(), input) return c.WaitUntilVpnConnectionDeletedWithContext(aws.BackgroundContext(), input)

File diff suppressed because it is too large Load Diff

View File

@@ -64,69 +64,17 @@
// //
// Using the Client // Using the Client
// //
// To use the client for AWS Identity and Access Management you will first need // To AWS Identity and Access Management with the SDK use the New function to create
// to create a new instance of it. // a new service client. With that client you can make API requests to the service.
// These clients are safe to use concurrently.
// //
// When creating a client for an AWS service you'll first need to have a Session // See the SDK's documentation for more information on how to use the SDK.
// already created. The Session provides configuration that can be shared
// between multiple service clients. Additional configuration can be applied to
// the Session and service's client when they are constructed. The aws package's
// Config type contains several fields such as Region for the AWS Region the
// client should make API requests too. The optional Config value can be provided
// as the variadic argument for Sessions and client creation.
//
// Once the service's client is created you can use it to make API requests the
// AWS service. These clients are safe to use concurrently.
//
// // Create a session to share configuration, and load external configuration.
// sess := session.Must(session.NewSession())
//
// // Create the service's client with the session.
// svc := iam.New(sess)
//
// See the SDK's documentation for more information on how to use service clients.
// https://docs.aws.amazon.com/sdk-for-go/api/ // https://docs.aws.amazon.com/sdk-for-go/api/
// //
// See aws package's Config type for more information on configuration options. // See aws.Config documentation for more information on configuring SDK clients.
// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config // https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
// //
// See the AWS Identity and Access Management client IAM for more // See the AWS Identity and Access Management client IAM for more
// information on creating the service's client. // information on creating client for this service.
// https://docs.aws.amazon.com/sdk-for-go/api/service/iam/#New // https://docs.aws.amazon.com/sdk-for-go/api/service/iam/#New
//
// Once the client is created you can make an API request to the service.
// Each API method takes a input parameter, and returns the service response
// and an error.
//
// The API method will document which error codes the service can be returned
// by the operation if the service models the API operation's errors. These
// errors will also be available as const strings prefixed with "ErrCode".
//
// result, err := svc.AddClientIDToOpenIDConnectProvider(params)
// if err != nil {
// // Cast err to awserr.Error to handle specific error codes.
// aerr, ok := err.(awserr.Error)
// if ok && aerr.Code() == <error code to check for> {
// // Specific error code handling
// }
// return err
// }
//
// fmt.Println("AddClientIDToOpenIDConnectProvider result:")
// fmt.Println(result)
//
// Using the Client with Context
//
// The service's client also provides methods to make API requests with a Context
// value. This allows you to control the timeout, and cancellation of pending
// requests. These methods also take request Option as variadic parameter to apply
// additional configuration to the API request.
//
// ctx := context.Background()
//
// result, err := svc.AddClientIDToOpenIDConnectProviderWithContext(ctx, params)
//
// See the request package documentation for more information on using Context pattern
// with the SDK.
// https://docs.aws.amazon.com/sdk-for-go/api/aws/request/
package iam package iam

View File

@@ -11,7 +11,7 @@ import (
// WaitUntilInstanceProfileExists uses the IAM API operation // WaitUntilInstanceProfileExists uses the IAM API operation
// GetInstanceProfile to wait for a condition to be met before returning. // GetInstanceProfile to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will // If the condition is not met within the max attempt window, an error will
// be returned. // be returned.
func (c *IAM) WaitUntilInstanceProfileExists(input *GetInstanceProfileInput) error { func (c *IAM) WaitUntilInstanceProfileExists(input *GetInstanceProfileInput) error {
return c.WaitUntilInstanceProfileExistsWithContext(aws.BackgroundContext(), input) return c.WaitUntilInstanceProfileExistsWithContext(aws.BackgroundContext(), input)
@@ -62,7 +62,7 @@ func (c *IAM) WaitUntilInstanceProfileExistsWithContext(ctx aws.Context, input *
// WaitUntilUserExists uses the IAM API operation // WaitUntilUserExists uses the IAM API operation
// GetUser to wait for a condition to be met before returning. // GetUser to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will // If the condition is not met within the max attempt window, an error will
// be returned. // be returned.
func (c *IAM) WaitUntilUserExists(input *GetUserInput) error { func (c *IAM) WaitUntilUserExists(input *GetUserInput) error {
return c.WaitUntilUserExistsWithContext(aws.BackgroundContext(), input) return c.WaitUntilUserExistsWithContext(aws.BackgroundContext(), input)

File diff suppressed because it is too large Load Diff

View File

@@ -44,3 +44,21 @@ func defaultInitRequestFn(r *request.Request) {
r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarhsalError) r.Handlers.Unmarshal.PushFront(copyMultipartStatusOKUnmarhsalError)
} }
} }
// bucketGetter is an accessor interface to grab the "Bucket" field from
// an S3 type.
type bucketGetter interface {
getBucket() string
}
// sseCustomerKeyGetter is an accessor interface to grab the "SSECustomerKey"
// field from an S3 type.
type sseCustomerKeyGetter interface {
getSSECustomerKey() string
}
// copySourceSSECustomerKeyGetter is an accessor interface to grab the
// "CopySourceSSECustomerKey" field from an S3 type.
type copySourceSSECustomerKeyGetter interface {
getCopySourceSSECustomerKey() string
}

View File

@@ -10,69 +10,17 @@
// //
// Using the Client // Using the Client
// //
// To use the client for Amazon Simple Storage Service you will first need // To Amazon Simple Storage Service with the SDK use the New function to create
// to create a new instance of it. // a new service client. With that client you can make API requests to the service.
// These clients are safe to use concurrently.
// //
// When creating a client for an AWS service you'll first need to have a Session // See the SDK's documentation for more information on how to use the SDK.
// already created. The Session provides configuration that can be shared
// between multiple service clients. Additional configuration can be applied to
// the Session and service's client when they are constructed. The aws package's
// Config type contains several fields such as Region for the AWS Region the
// client should make API requests too. The optional Config value can be provided
// as the variadic argument for Sessions and client creation.
//
// Once the service's client is created you can use it to make API requests the
// AWS service. These clients are safe to use concurrently.
//
// // Create a session to share configuration, and load external configuration.
// sess := session.Must(session.NewSession())
//
// // Create the service's client with the session.
// svc := s3.New(sess)
//
// See the SDK's documentation for more information on how to use service clients.
// https://docs.aws.amazon.com/sdk-for-go/api/ // https://docs.aws.amazon.com/sdk-for-go/api/
// //
// See aws package's Config type for more information on configuration options. // See aws.Config documentation for more information on configuring SDK clients.
// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config // https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
// //
// See the Amazon Simple Storage Service client S3 for more // See the Amazon Simple Storage Service client S3 for more
// information on creating the service's client. // information on creating client for this service.
// https://docs.aws.amazon.com/sdk-for-go/api/service/s3/#New // https://docs.aws.amazon.com/sdk-for-go/api/service/s3/#New
//
// Once the client is created you can make an API request to the service.
// Each API method takes a input parameter, and returns the service response
// and an error.
//
// The API method will document which error codes the service can be returned
// by the operation if the service models the API operation's errors. These
// errors will also be available as const strings prefixed with "ErrCode".
//
// result, err := svc.AbortMultipartUpload(params)
// if err != nil {
// // Cast err to awserr.Error to handle specific error codes.
// aerr, ok := err.(awserr.Error)
// if ok && aerr.Code() == <error code to check for> {
// // Specific error code handling
// }
// return err
// }
//
// fmt.Println("AbortMultipartUpload result:")
// fmt.Println(result)
//
// Using the Client with Context
//
// The service's client also provides methods to make API requests with a Context
// value. This allows you to control the timeout, and cancellation of pending
// requests. These methods also take request Option as variadic parameter to apply
// additional configuration to the API request.
//
// ctx := context.Background()
//
// result, err := svc.AbortMultipartUploadWithContext(ctx, params)
//
// See the request package documentation for more information on using Context pattern
// with the SDK.
// https://docs.aws.amazon.com/sdk-for-go/api/aws/request/
package s3 package s3

View File

@@ -8,7 +8,6 @@ import (
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/aws/request"
) )
@@ -113,15 +112,9 @@ func updateEndpointForAccelerate(r *request.Request) {
// Attempts to retrieve the bucket name from the request input parameters. // Attempts to retrieve the bucket name from the request input parameters.
// If no bucket is found, or the field is empty "", false will be returned. // If no bucket is found, or the field is empty "", false will be returned.
func bucketNameFromReqParams(params interface{}) (string, bool) { func bucketNameFromReqParams(params interface{}) (string, bool) {
b, _ := awsutil.ValuesAtPath(params, "Bucket") if iface, ok := params.(bucketGetter); ok {
if len(b) == 0 { b := iface.getBucket()
return "", false return b, len(b) > 0
}
if bucket, ok := b[0].(*string); ok {
if bucketStr := aws.StringValue(bucket); bucketStr != "" {
return bucketStr, true
}
} }
return "", false return "", false

View File

@@ -5,17 +5,27 @@ import (
"encoding/base64" "encoding/base64"
"github.com/aws/aws-sdk-go/aws/awserr" "github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/aws/request" "github.com/aws/aws-sdk-go/aws/request"
) )
var errSSERequiresSSL = awserr.New("ConfigError", "cannot send SSE keys over HTTP.", nil) var errSSERequiresSSL = awserr.New("ConfigError", "cannot send SSE keys over HTTP.", nil)
func validateSSERequiresSSL(r *request.Request) { func validateSSERequiresSSL(r *request.Request) {
if r.HTTPRequest.URL.Scheme != "https" { if r.HTTPRequest.URL.Scheme == "https" {
p, _ := awsutil.ValuesAtPath(r.Params, "SSECustomerKey||CopySourceSSECustomerKey") return
if len(p) > 0 { }
if iface, ok := r.Params.(sseCustomerKeyGetter); ok {
if len(iface.getSSECustomerKey()) > 0 {
r.Error = errSSERequiresSSL r.Error = errSSERequiresSSL
return
}
}
if iface, ok := r.Params.(copySourceSSECustomerKeyGetter); ok {
if len(iface.getCopySourceSSECustomerKey()) > 0 {
r.Error = errSSERequiresSSL
return
} }
} }
} }

View File

@@ -11,7 +11,7 @@ import (
// WaitUntilBucketExists uses the Amazon S3 API operation // WaitUntilBucketExists uses the Amazon S3 API operation
// HeadBucket to wait for a condition to be met before returning. // HeadBucket to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will // If the condition is not met within the max attempt window, an error will
// be returned. // be returned.
func (c *S3) WaitUntilBucketExists(input *HeadBucketInput) error { func (c *S3) WaitUntilBucketExists(input *HeadBucketInput) error {
return c.WaitUntilBucketExistsWithContext(aws.BackgroundContext(), input) return c.WaitUntilBucketExistsWithContext(aws.BackgroundContext(), input)
@@ -72,7 +72,7 @@ func (c *S3) WaitUntilBucketExistsWithContext(ctx aws.Context, input *HeadBucket
// WaitUntilBucketNotExists uses the Amazon S3 API operation // WaitUntilBucketNotExists uses the Amazon S3 API operation
// HeadBucket to wait for a condition to be met before returning. // HeadBucket to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will // If the condition is not met within the max attempt window, an error will
// be returned. // be returned.
func (c *S3) WaitUntilBucketNotExists(input *HeadBucketInput) error { func (c *S3) WaitUntilBucketNotExists(input *HeadBucketInput) error {
return c.WaitUntilBucketNotExistsWithContext(aws.BackgroundContext(), input) return c.WaitUntilBucketNotExistsWithContext(aws.BackgroundContext(), input)
@@ -118,7 +118,7 @@ func (c *S3) WaitUntilBucketNotExistsWithContext(ctx aws.Context, input *HeadBuc
// WaitUntilObjectExists uses the Amazon S3 API operation // WaitUntilObjectExists uses the Amazon S3 API operation
// HeadObject to wait for a condition to be met before returning. // HeadObject to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will // If the condition is not met within the max attempt window, an error will
// be returned. // be returned.
func (c *S3) WaitUntilObjectExists(input *HeadObjectInput) error { func (c *S3) WaitUntilObjectExists(input *HeadObjectInput) error {
return c.WaitUntilObjectExistsWithContext(aws.BackgroundContext(), input) return c.WaitUntilObjectExistsWithContext(aws.BackgroundContext(), input)
@@ -169,7 +169,7 @@ func (c *S3) WaitUntilObjectExistsWithContext(ctx aws.Context, input *HeadObject
// WaitUntilObjectNotExists uses the Amazon S3 API operation // WaitUntilObjectNotExists uses the Amazon S3 API operation
// HeadObject to wait for a condition to be met before returning. // HeadObject to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will // If the condition is not met within the max attempt window, an error will
// be returned. // be returned.
func (c *S3) WaitUntilObjectNotExists(input *HeadObjectInput) error { func (c *S3) WaitUntilObjectNotExists(input *HeadObjectInput) error {
return c.WaitUntilObjectNotExistsWithContext(aws.BackgroundContext(), input) return c.WaitUntilObjectNotExistsWithContext(aws.BackgroundContext(), input)

View File

@@ -14,19 +14,18 @@ const opAssumeRole = "AssumeRole"
// AssumeRoleRequest generates a "aws/request.Request" representing the // AssumeRoleRequest generates a "aws/request.Request" representing the
// client's request for the AssumeRole operation. The "output" return // client's request for the AssumeRole operation. The "output" return
// value can be used to capture response data after the request's "Send" method // value will be populated with the request's response once the request complets
// is called. // successfuly.
// //
// See AssumeRole for usage and error information. // Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
// //
// Creating a request object using this method should be used when you want to inject // See AssumeRole for more information on using the AssumeRole
// custom logic into the request's lifecycle using a custom handler, or if you want to // API call, and error handling.
// access properties on the request object before or after sending the request. If //
// you just want the service response, call the AssumeRole method directly // This method is useful when you want to inject custom logic or configuration
// instead. // into the SDK's request lifecycle. Such as custom headers, or retry logic.
// //
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
// //
// // Example sending a request using the AssumeRoleRequest method. // // Example sending a request using the AssumeRoleRequest method.
// req, resp := client.AssumeRoleRequest(params) // req, resp := client.AssumeRoleRequest(params)
@@ -195,19 +194,18 @@ const opAssumeRoleWithSAML = "AssumeRoleWithSAML"
// AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the // AssumeRoleWithSAMLRequest generates a "aws/request.Request" representing the
// client's request for the AssumeRoleWithSAML operation. The "output" return // client's request for the AssumeRoleWithSAML operation. The "output" return
// value can be used to capture response data after the request's "Send" method // value will be populated with the request's response once the request complets
// is called. // successfuly.
// //
// See AssumeRoleWithSAML for usage and error information. // Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
// //
// Creating a request object using this method should be used when you want to inject // See AssumeRoleWithSAML for more information on using the AssumeRoleWithSAML
// custom logic into the request's lifecycle using a custom handler, or if you want to // API call, and error handling.
// access properties on the request object before or after sending the request. If //
// you just want the service response, call the AssumeRoleWithSAML method directly // This method is useful when you want to inject custom logic or configuration
// instead. // into the SDK's request lifecycle. Such as custom headers, or retry logic.
// //
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
// //
// // Example sending a request using the AssumeRoleWithSAMLRequest method. // // Example sending a request using the AssumeRoleWithSAMLRequest method.
// req, resp := client.AssumeRoleWithSAMLRequest(params) // req, resp := client.AssumeRoleWithSAMLRequest(params)
@@ -369,19 +367,18 @@ const opAssumeRoleWithWebIdentity = "AssumeRoleWithWebIdentity"
// AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the // AssumeRoleWithWebIdentityRequest generates a "aws/request.Request" representing the
// client's request for the AssumeRoleWithWebIdentity operation. The "output" return // client's request for the AssumeRoleWithWebIdentity operation. The "output" return
// value can be used to capture response data after the request's "Send" method // value will be populated with the request's response once the request complets
// is called. // successfuly.
// //
// See AssumeRoleWithWebIdentity for usage and error information. // Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
// //
// Creating a request object using this method should be used when you want to inject // See AssumeRoleWithWebIdentity for more information on using the AssumeRoleWithWebIdentity
// custom logic into the request's lifecycle using a custom handler, or if you want to // API call, and error handling.
// access properties on the request object before or after sending the request. If //
// you just want the service response, call the AssumeRoleWithWebIdentity method directly // This method is useful when you want to inject custom logic or configuration
// instead. // into the SDK's request lifecycle. Such as custom headers, or retry logic.
// //
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
// //
// // Example sending a request using the AssumeRoleWithWebIdentityRequest method. // // Example sending a request using the AssumeRoleWithWebIdentityRequest method.
// req, resp := client.AssumeRoleWithWebIdentityRequest(params) // req, resp := client.AssumeRoleWithWebIdentityRequest(params)
@@ -572,19 +569,18 @@ const opDecodeAuthorizationMessage = "DecodeAuthorizationMessage"
// DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the // DecodeAuthorizationMessageRequest generates a "aws/request.Request" representing the
// client's request for the DecodeAuthorizationMessage operation. The "output" return // client's request for the DecodeAuthorizationMessage operation. The "output" return
// value can be used to capture response data after the request's "Send" method // value will be populated with the request's response once the request complets
// is called. // successfuly.
// //
// See DecodeAuthorizationMessage for usage and error information. // Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
// //
// Creating a request object using this method should be used when you want to inject // See DecodeAuthorizationMessage for more information on using the DecodeAuthorizationMessage
// custom logic into the request's lifecycle using a custom handler, or if you want to // API call, and error handling.
// access properties on the request object before or after sending the request. If //
// you just want the service response, call the DecodeAuthorizationMessage method directly // This method is useful when you want to inject custom logic or configuration
// instead. // into the SDK's request lifecycle. Such as custom headers, or retry logic.
// //
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
// //
// // Example sending a request using the DecodeAuthorizationMessageRequest method. // // Example sending a request using the DecodeAuthorizationMessageRequest method.
// req, resp := client.DecodeAuthorizationMessageRequest(params) // req, resp := client.DecodeAuthorizationMessageRequest(params)
@@ -685,19 +681,18 @@ const opGetCallerIdentity = "GetCallerIdentity"
// GetCallerIdentityRequest generates a "aws/request.Request" representing the // GetCallerIdentityRequest generates a "aws/request.Request" representing the
// client's request for the GetCallerIdentity operation. The "output" return // client's request for the GetCallerIdentity operation. The "output" return
// value can be used to capture response data after the request's "Send" method // value will be populated with the request's response once the request complets
// is called. // successfuly.
// //
// See GetCallerIdentity for usage and error information. // Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
// //
// Creating a request object using this method should be used when you want to inject // See GetCallerIdentity for more information on using the GetCallerIdentity
// custom logic into the request's lifecycle using a custom handler, or if you want to // API call, and error handling.
// access properties on the request object before or after sending the request. If //
// you just want the service response, call the GetCallerIdentity method directly // This method is useful when you want to inject custom logic or configuration
// instead. // into the SDK's request lifecycle. Such as custom headers, or retry logic.
// //
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
// //
// // Example sending a request using the GetCallerIdentityRequest method. // // Example sending a request using the GetCallerIdentityRequest method.
// req, resp := client.GetCallerIdentityRequest(params) // req, resp := client.GetCallerIdentityRequest(params)
@@ -761,19 +756,18 @@ const opGetFederationToken = "GetFederationToken"
// GetFederationTokenRequest generates a "aws/request.Request" representing the // GetFederationTokenRequest generates a "aws/request.Request" representing the
// client's request for the GetFederationToken operation. The "output" return // client's request for the GetFederationToken operation. The "output" return
// value can be used to capture response data after the request's "Send" method // value will be populated with the request's response once the request complets
// is called. // successfuly.
// //
// See GetFederationToken for usage and error information. // Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
// //
// Creating a request object using this method should be used when you want to inject // See GetFederationToken for more information on using the GetFederationToken
// custom logic into the request's lifecycle using a custom handler, or if you want to // API call, and error handling.
// access properties on the request object before or after sending the request. If //
// you just want the service response, call the GetFederationToken method directly // This method is useful when you want to inject custom logic or configuration
// instead. // into the SDK's request lifecycle. Such as custom headers, or retry logic.
// //
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
// //
// // Example sending a request using the GetFederationTokenRequest method. // // Example sending a request using the GetFederationTokenRequest method.
// req, resp := client.GetFederationTokenRequest(params) // req, resp := client.GetFederationTokenRequest(params)
@@ -931,19 +925,18 @@ const opGetSessionToken = "GetSessionToken"
// GetSessionTokenRequest generates a "aws/request.Request" representing the // GetSessionTokenRequest generates a "aws/request.Request" representing the
// client's request for the GetSessionToken operation. The "output" return // client's request for the GetSessionToken operation. The "output" return
// value can be used to capture response data after the request's "Send" method // value will be populated with the request's response once the request complets
// is called. // successfuly.
// //
// See GetSessionToken for usage and error information. // Use "Send" method on the returned Request to send the API call to the service.
// the "output" return value is not valid until after Send returns without error.
// //
// Creating a request object using this method should be used when you want to inject // See GetSessionToken for more information on using the GetSessionToken
// custom logic into the request's lifecycle using a custom handler, or if you want to // API call, and error handling.
// access properties on the request object before or after sending the request. If //
// you just want the service response, call the GetSessionToken method directly // This method is useful when you want to inject custom logic or configuration
// instead. // into the SDK's request lifecycle. Such as custom headers, or retry logic.
// //
// Note: You must call the "Send" method on the returned request object in order
// to execute the request.
// //
// // Example sending a request using the GetSessionTokenRequest method. // // Example sending a request using the GetSessionTokenRequest method.
// req, resp := client.GetSessionTokenRequest(params) // req, resp := client.GetSessionTokenRequest(params)

View File

@@ -56,69 +56,17 @@
// //
// Using the Client // Using the Client
// //
// To use the client for AWS Security Token Service you will first need // To AWS Security Token Service with the SDK use the New function to create
// to create a new instance of it. // a new service client. With that client you can make API requests to the service.
// These clients are safe to use concurrently.
// //
// When creating a client for an AWS service you'll first need to have a Session // See the SDK's documentation for more information on how to use the SDK.
// already created. The Session provides configuration that can be shared
// between multiple service clients. Additional configuration can be applied to
// the Session and service's client when they are constructed. The aws package's
// Config type contains several fields such as Region for the AWS Region the
// client should make API requests too. The optional Config value can be provided
// as the variadic argument for Sessions and client creation.
//
// Once the service's client is created you can use it to make API requests the
// AWS service. These clients are safe to use concurrently.
//
// // Create a session to share configuration, and load external configuration.
// sess := session.Must(session.NewSession())
//
// // Create the service's client with the session.
// svc := sts.New(sess)
//
// See the SDK's documentation for more information on how to use service clients.
// https://docs.aws.amazon.com/sdk-for-go/api/ // https://docs.aws.amazon.com/sdk-for-go/api/
// //
// See aws package's Config type for more information on configuration options. // See aws.Config documentation for more information on configuring SDK clients.
// https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config // https://docs.aws.amazon.com/sdk-for-go/api/aws/#Config
// //
// See the AWS Security Token Service client STS for more // See the AWS Security Token Service client STS for more
// information on creating the service's client. // information on creating client for this service.
// https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#New // https://docs.aws.amazon.com/sdk-for-go/api/service/sts/#New
//
// Once the client is created you can make an API request to the service.
// Each API method takes a input parameter, and returns the service response
// and an error.
//
// The API method will document which error codes the service can be returned
// by the operation if the service models the API operation's errors. These
// errors will also be available as const strings prefixed with "ErrCode".
//
// result, err := svc.AssumeRole(params)
// if err != nil {
// // Cast err to awserr.Error to handle specific error codes.
// aerr, ok := err.(awserr.Error)
// if ok && aerr.Code() == <error code to check for> {
// // Specific error code handling
// }
// return err
// }
//
// fmt.Println("AssumeRole result:")
// fmt.Println(result)
//
// Using the Client with Context
//
// The service's client also provides methods to make API requests with a Context
// value. This allows you to control the timeout, and cancellation of pending
// requests. These methods also take request Option as variadic parameter to apply
// additional configuration to the API request.
//
// ctx := context.Background()
//
// result, err := svc.AssumeRoleWithContext(ctx, params)
//
// See the request package documentation for more information on using Context pattern
// with the SDK.
// https://docs.aws.amazon.com/sdk-for-go/api/aws/request/
package sts package sts

View File

@@ -52,6 +52,9 @@ type TokenKeyType string
// TokenAppType - Circonus API Token app name // TokenAppType - Circonus API Token app name
type TokenAppType string type TokenAppType string
// TokenAccountIDType - Circonus API Token account id
type TokenAccountIDType string
// CIDType Circonus object cid // CIDType Circonus object cid
type CIDType *string type CIDType *string
@@ -72,12 +75,13 @@ type TagType []string
// Config options for Circonus API // Config options for Circonus API
type Config struct { type Config struct {
URL string URL string
TokenKey string TokenKey string
TokenApp string TokenApp string
CACert *x509.CertPool TokenAccountID string
Log *log.Logger CACert *x509.CertPool
Debug bool Log *log.Logger
Debug bool
} }
// API Circonus API // API Circonus API
@@ -85,6 +89,7 @@ type API struct {
apiURL *url.URL apiURL *url.URL
key TokenKeyType key TokenKeyType
app TokenAppType app TokenAppType
accountID TokenAccountIDType
caCert *x509.CertPool caCert *x509.CertPool
Debug bool Debug bool
Log *log.Logger Log *log.Logger
@@ -119,6 +124,8 @@ func New(ac *Config) (*API, error) {
app = defaultAPIApp app = defaultAPIApp
} }
acctID := TokenAccountIDType(ac.TokenAccountID)
au := string(ac.URL) au := string(ac.URL)
if au == "" { if au == "" {
au = defaultAPIURL au = defaultAPIURL
@@ -137,12 +144,13 @@ func New(ac *Config) (*API, error) {
} }
a := &API{ a := &API{
apiURL: apiURL, apiURL: apiURL,
key: key, key: key,
app: app, app: app,
caCert: ac.CACert, accountID: acctID,
Debug: ac.Debug, caCert: ac.CACert,
Log: ac.Log, Debug: ac.Debug,
Log: ac.Log,
useExponentialBackoff: false, useExponentialBackoff: false,
} }
@@ -291,6 +299,9 @@ func (a *API) apiCall(reqMethod string, reqPath string, data []byte) ([]byte, er
req.Header.Add("Accept", "application/json") req.Header.Add("Accept", "application/json")
req.Header.Add("X-Circonus-Auth-Token", string(a.key)) req.Header.Add("X-Circonus-Auth-Token", string(a.key))
req.Header.Add("X-Circonus-App-Name", string(a.app)) req.Header.Add("X-Circonus-App-Name", string(a.app))
if string(a.accountID) != "" {
req.Header.Add("X-Circonus-Account-ID", string(a.accountID))
}
client := retryablehttp.NewClient() client := retryablehttp.NewClient()
if a.apiURL.Scheme == "https" && a.caCert != nil { if a.apiURL.Scheme == "https" && a.caCert != nil {

View File

@@ -138,8 +138,20 @@ func (cm *CheckManager) selectBroker() (*api.Broker, error) {
// Verify broker supports the check type to be used // Verify broker supports the check type to be used
func (cm *CheckManager) brokerSupportsCheckType(checkType CheckTypeType, details *api.BrokerDetail) bool { func (cm *CheckManager) brokerSupportsCheckType(checkType CheckTypeType, details *api.BrokerDetail) bool {
baseType := string(checkType)
for _, module := range details.Modules { for _, module := range details.Modules {
if CheckTypeType(module) == checkType { if module == baseType {
return true
}
}
if idx := strings.Index(baseType, ":"); idx > 0 {
baseType = baseType[0:idx]
}
for _, module := range details.Modules {
if module == baseType {
return true return true
} }
} }

View File

@@ -307,12 +307,9 @@ func (cm *CheckManager) createNewCheck() (*api.CheckBundle, *api.Broker, error)
return nil, nil, err return nil, nil, err
} }
config := &api.CheckBundle{ chkcfg := &api.CheckBundle{
Brokers: []string{broker.CID}, Brokers: []string{broker.CID},
Config: map[config.Key]string{ Config: make(map[config.Key]string),
config.AsyncMetrics: "true",
config.Secret: checkSecret,
},
DisplayName: string(cm.checkDisplayName), DisplayName: string(cm.checkDisplayName),
Metrics: []api.CheckBundleMetric{}, Metrics: []api.CheckBundleMetric{},
MetricLimit: config.DefaultCheckBundleMetricLimit, MetricLimit: config.DefaultCheckBundleMetricLimit,
@@ -325,7 +322,24 @@ func (cm *CheckManager) createNewCheck() (*api.CheckBundle, *api.Broker, error)
Type: string(cm.checkType), Type: string(cm.checkType),
} }
checkBundle, err := cm.apih.CreateCheckBundle(config) if len(cm.customConfigFields) > 0 {
for fld, val := range cm.customConfigFields {
chkcfg.Config[config.Key(fld)] = val
}
}
//
// use the default config settings if these are NOT set by user configuration
//
if val, ok := chkcfg.Config[config.AsyncMetrics]; !ok || val == "" {
chkcfg.Config[config.AsyncMetrics] = "true"
}
if val, ok := chkcfg.Config[config.Secret]; !ok || val == "" {
chkcfg.Config[config.Secret] = checkSecret
}
checkBundle, err := cm.apih.CreateCheckBundle(chkcfg)
if err != nil { if err != nil {
return nil, nil, err return nil, nil, err
} }

View File

@@ -85,6 +85,10 @@ type CheckConfig struct {
// overrides the behavior and will re-activate the metric when it is // overrides the behavior and will re-activate the metric when it is
// encountered. "(true|false)", default "false" // encountered. "(true|false)", default "false"
ForceMetricActivation string ForceMetricActivation string
// Type of check to use (default: httptrap)
Type string
// Custom check config fields (default: none)
CustomConfigFields map[string]string
} }
// BrokerConfig options for broker // BrokerConfig options for broker
@@ -151,6 +155,7 @@ type CheckManager struct {
checkSearchTag api.TagType checkSearchTag api.TagType
checkSecret CheckSecretType checkSecret CheckSecretType
checkTags api.TagType checkTags api.TagType
customConfigFields map[string]string
checkSubmissionURL api.URLType checkSubmissionURL api.URLType
checkDisplayName CheckDisplayNameType checkDisplayName CheckDisplayNameType
forceMetricActivation bool forceMetricActivation bool
@@ -233,7 +238,11 @@ func New(cfg *Config) (*CheckManager, error) {
} }
// initialize check related data // initialize check related data
cm.checkType = defaultCheckType if cfg.Check.Type != "" {
cm.checkType = CheckTypeType(cfg.Check.Type)
} else {
cm.checkType = defaultCheckType
}
idSetting := "0" idSetting := "0"
if cfg.Check.ID != "" { if cfg.Check.ID != "" {
@@ -285,6 +294,13 @@ func New(cfg *Config) (*CheckManager, error) {
cm.checkTags = strings.Split(strings.Replace(cfg.Check.Tags, " ", "", -1), ",") cm.checkTags = strings.Split(strings.Replace(cfg.Check.Tags, " ", "", -1), ",")
} }
cm.customConfigFields = make(map[string]string)
if len(cfg.Check.CustomConfigFields) > 0 {
for fld, val := range cfg.Check.CustomConfigFields {
cm.customConfigFields[fld] = val
}
}
dur := cfg.Check.MaxURLAge dur := cfg.Check.MaxURLAge
if dur == "" { if dur == "" {
dur = defaultTrapMaxURLAge dur = defaultTrapMaxURLAge

View File

@@ -254,7 +254,7 @@ func (m *CirconusMetrics) Flush() {
} }
if send { if send {
output[name] = map[string]interface{}{ output[name] = map[string]interface{}{
"_type": "n", "_type": "L",
"_value": value, "_value": value,
} }
} }

View File

@@ -4,6 +4,8 @@
package circonusgometrics package circonusgometrics
import "fmt"
// A Counter is a monotonically increasing unsigned integer. // A Counter is a monotonically increasing unsigned integer.
// //
// Use a counter to derive rates (e.g., record total number of requests, derive // Use a counter to derive rates (e.g., record total number of requests, derive
@@ -40,6 +42,19 @@ func (m *CirconusMetrics) RemoveCounter(metric string) {
delete(m.counters, metric) delete(m.counters, metric)
} }
// GetCounterTest returns the current value for a counter. (note: it is a function specifically for "testing", disable automatic submission during testing.)
func (m *CirconusMetrics) GetCounterTest(metric string) (uint64, error) {
m.cm.Lock()
defer m.cm.Unlock()
if val, ok := m.counters[metric]; ok {
return val, nil
}
return 0, fmt.Errorf("Counter metric '%s' not found", metric)
}
// SetCounterFunc set counter to a function [called at flush interval] // SetCounterFunc set counter to a function [called at flush interval]
func (m *CirconusMetrics) SetCounterFunc(metric string, fn func() uint64) { func (m *CirconusMetrics) SetCounterFunc(metric string, fn func() uint64) {
m.cfm.Lock() m.cfm.Lock()

View File

@@ -32,6 +32,18 @@ func (m *CirconusMetrics) RemoveGauge(metric string) {
delete(m.gauges, metric) delete(m.gauges, metric)
} }
// GetGaugeTest returns the current value for a gauge. (note: it is a function specifically for "testing", disable automatic submission during testing.)
func (m *CirconusMetrics) GetGaugeTest(metric string) (string, error) {
m.gm.Lock()
defer m.gm.Unlock()
if val, ok := m.gauges[metric]; ok {
return val, nil
}
return "", fmt.Errorf("Gauge metric '%s' not found", metric)
}
// SetGaugeFunc sets a gauge to a function [called at flush interval] // SetGaugeFunc sets a gauge to a function [called at flush interval]
func (m *CirconusMetrics) SetGaugeFunc(metric string, fn func() int64) { func (m *CirconusMetrics) SetGaugeFunc(metric string, fn func() int64) {
m.gfm.Lock() m.gfm.Lock()

View File

@@ -5,6 +5,7 @@
package circonusgometrics package circonusgometrics
import ( import (
"fmt"
"sync" "sync"
"github.com/circonus-labs/circonusllhist" "github.com/circonus-labs/circonusllhist"
@@ -38,6 +39,18 @@ func (m *CirconusMetrics) SetHistogramValue(metric string, val float64) {
m.hm.Unlock() m.hm.Unlock()
} }
// GetHistogramTest returns the current value for a gauge. (note: it is a function specifically for "testing", disable automatic submission during testing.)
func (m *CirconusMetrics) GetHistogramTest(metric string) ([]string, error) {
m.hm.Lock()
defer m.hm.Unlock()
if hist, ok := m.histograms[metric]; ok {
return hist.hist.DecStrings(), nil
}
return []string{""}, fmt.Errorf("Histogram metric '%s' not found", metric)
}
// RemoveHistogram removes a histogram // RemoveHistogram removes a histogram
func (m *CirconusMetrics) RemoveHistogram(metric string) { func (m *CirconusMetrics) RemoveHistogram(metric string) {
m.hm.Lock() m.hm.Lock()

View File

@@ -11,15 +11,15 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
// implied. See the License for the specific language governing // implied. See the License for the specific language governing
// permissions and limitations under the License. // permissions and limitations under the License.
//
// Author: Andrei Matei (andrei@cockroachlabs.com)
// Package crdb provides helpers for using CockroachDB in client // Package crdb provides helpers for using CockroachDB in client
// applications. // applications.
package crdb package crdb
import ( import (
"context"
"database/sql" "database/sql"
"fmt"
"github.com/lib/pq" "github.com/lib/pq"
) )
@@ -39,22 +39,22 @@ type AmbiguousCommitError struct {
// In that case, we return AmbiguousCommitError. // In that case, we return AmbiguousCommitError.
// //
// For more information about CockroachDB's transaction model see // For more information about CockroachDB's transaction model see
// https://cockroachlabs.com/docs/transactions.html. // https://cockroachlabs.com/docs/stable/transactions.html.
// //
// NOTE: the supplied exec closure should not have external side // NOTE: the supplied exec closure should not have external side
// effects beyond changes to the database. // effects beyond changes to the database.
func ExecuteTx(db *sql.DB, fn func(*sql.Tx) error) (err error) { func ExecuteTx(ctx context.Context, db *sql.DB, txopts *sql.TxOptions, fn func(*sql.Tx) error) error {
// Start a transaction. // Start a transaction.
var tx *sql.Tx tx, err := db.BeginTx(ctx, txopts)
tx, err = db.Begin()
if err != nil { if err != nil {
return err return err
} }
return ExecuteInTx(tx, func() error { return fn(tx) }) return ExecuteInTx(ctx, tx, func() error { return fn(tx) })
} }
// Tx is used to permit clients to implement custom transaction logic.
type Tx interface { type Tx interface {
Exec(query string, args ...interface{}) (sql.Result, error) ExecContext(context.Context, string, ...interface{}) (sql.Result, error)
Commit() error Commit() error
Rollback() error Rollback() error
} }
@@ -64,7 +64,7 @@ type Tx interface {
// ExecuteInTx will only retry statements that are performed within the supplied // ExecuteInTx will only retry statements that are performed within the supplied
// closure (fn). Any statements performed on the tx before ExecuteInTx is invoked will *not* // closure (fn). Any statements performed on the tx before ExecuteInTx is invoked will *not*
// be re-run if the transaction needs to be retried. // be re-run if the transaction needs to be retried.
func ExecuteInTx(tx Tx, fn func() error) (err error) { func ExecuteInTx(ctx context.Context, tx Tx, fn func() error) (err error) {
defer func() { defer func() {
if err == nil { if err == nil {
// Ignore commit errors. The tx has already been committed by RELEASE. // Ignore commit errors. The tx has already been committed by RELEASE.
@@ -77,7 +77,7 @@ func ExecuteInTx(tx Tx, fn func() error) (err error) {
}() }()
// Specify that we intend to retry this txn in case of CockroachDB retryable // Specify that we intend to retry this txn in case of CockroachDB retryable
// errors. // errors.
if _, err = tx.Exec("SAVEPOINT cockroach_restart"); err != nil { if _, err = tx.ExecContext(ctx, "SAVEPOINT cockroach_restart"); err != nil {
return err return err
} }
@@ -88,7 +88,7 @@ func ExecuteInTx(tx Tx, fn func() error) (err error) {
// RELEASE acts like COMMIT in CockroachDB. We use it since it gives us an // RELEASE acts like COMMIT in CockroachDB. We use it since it gives us an
// opportunity to react to retryable errors, whereas tx.Commit() doesn't. // opportunity to react to retryable errors, whereas tx.Commit() doesn't.
released = true released = true
if _, err = tx.Exec("RELEASE SAVEPOINT cockroach_restart"); err == nil { if _, err = tx.ExecContext(ctx, "RELEASE SAVEPOINT cockroach_restart"); err == nil {
return nil return nil
} }
} }
@@ -103,8 +103,20 @@ func ExecuteInTx(tx Tx, fn func() error) (err error) {
} }
return err return err
} }
if _, err = tx.Exec("ROLLBACK TO SAVEPOINT cockroach_restart"); err != nil { if _, err = tx.ExecContext(ctx, "ROLLBACK TO SAVEPOINT cockroach_restart"); err != nil {
return err // ROLLBACK TO SAVEPOINT failed. If it failed with a lib/pq error, we want
// to pass this error to the client, but also include the original error
// message and code. So, we'll do some surgery on lib/pq errors in
// particular.
// If it failed with any other error (e.g. the "driver: bad connection" is
// untyped), we overwrite the error.
msgPattern := "restarting txn failed. ROLLBACK TO SAVEPOINT encountered error: %s. " +
"Original error (code: %s): %s."
if rollbackPQErr, ok := err.(*pq.Error); ok {
rollbackPQErr.Message = fmt.Sprintf(msgPattern, rollbackPQErr, pqErr.Code, pqErr)
return rollbackPQErr
}
return fmt.Errorf(msgPattern, err, pqErr.Code, pqErr)
} }
} }
} }

View File

@@ -1,6 +1,6 @@
# etcd/clientv3 # etcd/clientv3
[![Godoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/coreos/etcd/clientv3) [![Godoc](https://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](https://godoc.org/github.com/coreos/etcd/clientv3)
`etcd/clientv3` is the official Go etcd client for v3. `etcd/clientv3` is the official Go etcd client for v3.

View File

@@ -52,11 +52,9 @@ type Client struct {
conn *grpc.ClientConn conn *grpc.ClientConn
dialerrc chan error dialerrc chan error
cfg Config cfg Config
creds *credentials.TransportCredentials creds *credentials.TransportCredentials
balancer *simpleBalancer balancer *simpleBalancer
retryWrapper retryRpcFunc
retryAuthWrapper retryRpcFunc
ctx context.Context ctx context.Context
cancel context.CancelFunc cancel context.CancelFunc
@@ -387,8 +385,6 @@ func newClient(cfg *Config) (*Client, error) {
return nil, err return nil, err
} }
client.conn = conn client.conn = conn
client.retryWrapper = client.newRetryWrapper()
client.retryAuthWrapper = client.newAuthRetryWrapper()
// wait for a connection // wait for a connection
if cfg.DialTimeout > 0 { if cfg.DialTimeout > 0 {
@@ -510,7 +506,6 @@ func toErr(ctx context.Context, err error) error {
err = ctx.Err() err = ctx.Err()
} }
case codes.Unavailable: case codes.Unavailable:
err = ErrNoAvailableEndpoints
case codes.FailedPrecondition: case codes.FailedPrecondition:
err = grpc.ErrClientConnClosing err = grpc.ErrClientConnClosing
} }

View File

@@ -60,6 +60,8 @@ func Compare(cmp Cmp, result string, v interface{}) Cmp {
cmp.TargetUnion = &pb.Compare_CreateRevision{CreateRevision: mustInt64(v)} cmp.TargetUnion = &pb.Compare_CreateRevision{CreateRevision: mustInt64(v)}
case pb.Compare_MOD: case pb.Compare_MOD:
cmp.TargetUnion = &pb.Compare_ModRevision{ModRevision: mustInt64(v)} cmp.TargetUnion = &pb.Compare_ModRevision{ModRevision: mustInt64(v)}
case pb.Compare_LEASE:
cmp.TargetUnion = &pb.Compare_Lease{Lease: mustInt64(v)}
default: default:
panic("Unknown compare type") panic("Unknown compare type")
} }

View File

@@ -193,11 +193,12 @@ func (rs readSet) add(keys []string, txnresp *v3.TxnResponse) {
} }
} }
// first returns the store revision from the first fetch
func (rs readSet) first() int64 { func (rs readSet) first() int64 {
ret := int64(math.MaxInt64 - 1) ret := int64(math.MaxInt64 - 1)
for _, resp := range rs { for _, resp := range rs {
if len(resp.Kvs) > 0 && resp.Kvs[0].ModRevision < ret { if rev := resp.Header.Revision; rev < ret {
ret = resp.Kvs[0].ModRevision ret = rev
} }
} }
return ret return ret

View File

@@ -74,6 +74,19 @@ func (op OpResponse) Get() *GetResponse { return op.get }
func (op OpResponse) Del() *DeleteResponse { return op.del } func (op OpResponse) Del() *DeleteResponse { return op.del }
func (op OpResponse) Txn() *TxnResponse { return op.txn } func (op OpResponse) Txn() *TxnResponse { return op.txn }
func (resp *PutResponse) OpResponse() OpResponse {
return OpResponse{put: resp}
}
func (resp *GetResponse) OpResponse() OpResponse {
return OpResponse{get: resp}
}
func (resp *DeleteResponse) OpResponse() OpResponse {
return OpResponse{del: resp}
}
func (resp *TxnResponse) OpResponse() OpResponse {
return OpResponse{txn: resp}
}
type kv struct { type kv struct {
remote pb.KVClient remote pb.KVClient
} }

View File

@@ -60,6 +60,18 @@ type LeaseTimeToLiveResponse struct {
Keys [][]byte `json:"keys"` Keys [][]byte `json:"keys"`
} }
// LeaseStatus represents a lease status.
type LeaseStatus struct {
ID LeaseID `json:"id"`
// TODO: TTL int64
}
// LeaseLeasesResponse is used to convert the protobuf lease list response.
type LeaseLeasesResponse struct {
*pb.ResponseHeader
Leases []LeaseStatus `json:"leases"`
}
const ( const (
// defaultTTL is the assumed lease TTL used for the first keepalive // defaultTTL is the assumed lease TTL used for the first keepalive
// deadline before the actual TTL is known to the client. // deadline before the actual TTL is known to the client.
@@ -98,6 +110,9 @@ type Lease interface {
// TimeToLive retrieves the lease information of the given lease ID. // TimeToLive retrieves the lease information of the given lease ID.
TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption) (*LeaseTimeToLiveResponse, error)
// Leases retrieves all leases.
Leases(ctx context.Context) (*LeaseLeasesResponse, error)
// KeepAlive keeps the given lease alive forever. // KeepAlive keeps the given lease alive forever.
KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error)
@@ -219,6 +234,22 @@ func (l *lessor) TimeToLive(ctx context.Context, id LeaseID, opts ...LeaseOption
} }
} }
func (l *lessor) Leases(ctx context.Context) (*LeaseLeasesResponse, error) {
for {
resp, err := l.remote.LeaseLeases(ctx, &pb.LeaseLeasesRequest{}, grpc.FailFast(false))
if err == nil {
leases := make([]LeaseStatus, len(resp.Leases))
for i := range resp.Leases {
leases[i] = LeaseStatus{ID: LeaseID(resp.Leases[i].ID)}
}
return &LeaseLeasesResponse{ResponseHeader: resp.GetHeader(), Leases: leases}, nil
}
if isHaltErr(ctx, err) {
return nil, toErr(ctx, err)
}
}
}
func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) { func (l *lessor) KeepAlive(ctx context.Context, id LeaseID) (<-chan *LeaseKeepAliveResponse, error) {
ch := make(chan *LeaseKeepAliveResponse, leaseResponseChSize) ch := make(chan *LeaseKeepAliveResponse, leaseResponseChSize)

View File

@@ -28,6 +28,7 @@ type (
AlarmResponse pb.AlarmResponse AlarmResponse pb.AlarmResponse
AlarmMember pb.AlarmMember AlarmMember pb.AlarmMember
StatusResponse pb.StatusResponse StatusResponse pb.StatusResponse
HashKVResponse pb.HashKVResponse
MoveLeaderResponse pb.MoveLeaderResponse MoveLeaderResponse pb.MoveLeaderResponse
) )
@@ -50,6 +51,11 @@ type Maintenance interface {
// Status gets the status of the endpoint. // Status gets the status of the endpoint.
Status(ctx context.Context, endpoint string) (*StatusResponse, error) Status(ctx context.Context, endpoint string) (*StatusResponse, error)
// HashKV returns a hash of the KV state at the time of the RPC.
// If revision is zero, the hash is computed on all keys. If the revision
// is non-zero, the hash is computed on all keys at or below the given revision.
HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error)
// Snapshot provides a reader for a snapshot of a backend. // Snapshot provides a reader for a snapshot of a backend.
Snapshot(ctx context.Context) (io.ReadCloser, error) Snapshot(ctx context.Context) (io.ReadCloser, error)
@@ -159,6 +165,19 @@ func (m *maintenance) Status(ctx context.Context, endpoint string) (*StatusRespo
return (*StatusResponse)(resp), nil return (*StatusResponse)(resp), nil
} }
func (m *maintenance) HashKV(ctx context.Context, endpoint string, rev int64) (*HashKVResponse, error) {
remote, cancel, err := m.dial(endpoint)
if err != nil {
return nil, toErr(ctx, err)
}
defer cancel()
resp, err := remote.HashKV(ctx, &pb.HashKVRequest{Revision: rev}, grpc.FailFast(false))
if err != nil {
return nil, toErr(ctx, err)
}
return (*HashKVResponse)(resp), nil
}
func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) { func (m *maintenance) Snapshot(ctx context.Context) (io.ReadCloser, error) {
ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, grpc.FailFast(false)) ss, err := m.remote.Snapshot(ctx, &pb.SnapshotRequest{}, grpc.FailFast(false))
if err != nil { if err != nil {

View File

@@ -89,6 +89,45 @@ func (op *Op) WithKeyBytes(key []byte) { op.key = key }
// RangeBytes returns the byte slice holding with the Op's range end, if any. // RangeBytes returns the byte slice holding with the Op's range end, if any.
func (op Op) RangeBytes() []byte { return op.end } func (op Op) RangeBytes() []byte { return op.end }
// Rev returns the requested revision, if any.
func (op Op) Rev() int64 { return op.rev }
// IsPut returns true iff the operation is a Put.
func (op Op) IsPut() bool { return op.t == tPut }
// IsGet returns true iff the operation is a Get.
func (op Op) IsGet() bool { return op.t == tRange }
// IsDelete returns true iff the operation is a Delete.
func (op Op) IsDelete() bool { return op.t == tDeleteRange }
// IsSerializable returns true if the serializable field is true.
func (op Op) IsSerializable() bool { return op.serializable == true }
// IsKeysOnly returns true if the keysonly field is true.
func (op Op) IsKeysOnly() bool { return op.keysOnly == true }
// IsCountOnly returns true if the countonly field is true.
func (op Op) IsCountOnly() bool { return op.countOnly == true }
// MinModRev returns if field is populated.
func (op Op) MinModRev() int64 { return op.minModRev }
// MaxModRev returns if field is populated.
func (op Op) MaxModRev() int64 { return op.maxModRev }
// MinCreateRev returns if field is populated.
func (op Op) MinCreateRev() int64 { return op.minCreateRev }
// MaxCreateRev returns if field is populated.
func (op Op) MaxCreateRev() int64 { return op.maxCreateRev }
// Limit returns if field is populated.
func (op Op) retLimit() int64 { return op.limit }
// Sort returns if field is populated.
func (op Op) retSort() bool { return op.sort != nil }
// WithRangeBytes sets the byte slice for the Op's range end. // WithRangeBytes sets the byte slice for the Op's range end.
func (op *Op) WithRangeBytes(end []byte) { op.end = end } func (op *Op) WithRangeBytes(end []byte) { op.end = end }

View File

@@ -24,26 +24,29 @@ import (
type rpcFunc func(ctx context.Context) error type rpcFunc func(ctx context.Context) error
type retryRpcFunc func(context.Context, rpcFunc) error type retryRpcFunc func(context.Context, rpcFunc) error
type retryStopErrFunc func(error) bool
func (c *Client) newRetryWrapper() retryRpcFunc { func isReadStopError(err error) bool {
eErr := rpctypes.Error(err)
// always stop retry on etcd errors
if _, ok := eErr.(rpctypes.EtcdError); ok {
return true
}
// only retry if unavailable
return grpc.Code(err) != codes.Unavailable
}
func isWriteStopError(err error) bool {
return grpc.Code(err) != codes.Unavailable ||
grpc.ErrorDesc(err) != "there is no address available"
}
func (c *Client) newRetryWrapper(isStop retryStopErrFunc) retryRpcFunc {
return func(rpcCtx context.Context, f rpcFunc) error { return func(rpcCtx context.Context, f rpcFunc) error {
for { for {
err := f(rpcCtx) if err := f(rpcCtx); err == nil || isStop(err) {
if err == nil {
return nil
}
eErr := rpctypes.Error(err)
// always stop retry on etcd errors
if _, ok := eErr.(rpctypes.EtcdError); ok {
return err return err
} }
// only retry if unavailable
if grpc.Code(err) != codes.Unavailable {
return err
}
select { select {
case <-c.balancer.ConnectNotify(): case <-c.balancer.ConnectNotify():
case <-rpcCtx.Done(): case <-rpcCtx.Done():
@@ -79,17 +82,24 @@ func (c *Client) newAuthRetryWrapper() retryRpcFunc {
// RetryKVClient implements a KVClient that uses the client's FailFast retry policy. // RetryKVClient implements a KVClient that uses the client's FailFast retry policy.
func RetryKVClient(c *Client) pb.KVClient { func RetryKVClient(c *Client) pb.KVClient {
retryWrite := &retryWriteKVClient{pb.NewKVClient(c.conn), c.retryWrapper} readRetry := c.newRetryWrapper(isReadStopError)
return &retryKVClient{&retryWriteKVClient{retryWrite, c.retryAuthWrapper}} writeRetry := c.newRetryWrapper(isWriteStopError)
conn := pb.NewKVClient(c.conn)
retryBasic := &retryKVClient{&retryWriteKVClient{conn, writeRetry}, readRetry}
retryAuthWrapper := c.newAuthRetryWrapper()
return &retryKVClient{
&retryWriteKVClient{retryBasic, retryAuthWrapper},
retryAuthWrapper}
} }
type retryKVClient struct { type retryKVClient struct {
*retryWriteKVClient *retryWriteKVClient
readRetry retryRpcFunc
} }
func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) { func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) {
err = rkv.retryf(ctx, func(rctx context.Context) error { err = rkv.readRetry(ctx, func(rctx context.Context) error {
resp, err = rkv.retryWriteKVClient.Range(rctx, in, opts...) resp, err = rkv.KVClient.Range(rctx, in, opts...)
return err return err
}) })
return resp, err return resp, err
@@ -139,8 +149,11 @@ type retryLeaseClient struct {
// RetryLeaseClient implements a LeaseClient that uses the client's FailFast retry policy. // RetryLeaseClient implements a LeaseClient that uses the client's FailFast retry policy.
func RetryLeaseClient(c *Client) pb.LeaseClient { func RetryLeaseClient(c *Client) pb.LeaseClient {
retry := &retryLeaseClient{pb.NewLeaseClient(c.conn), c.retryWrapper} retry := &retryLeaseClient{
return &retryLeaseClient{retry, c.retryAuthWrapper} pb.NewLeaseClient(c.conn),
c.newRetryWrapper(isReadStopError),
}
return &retryLeaseClient{retry, c.newAuthRetryWrapper()}
} }
func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) { func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) {
@@ -167,7 +180,7 @@ type retryClusterClient struct {
// RetryClusterClient implements a ClusterClient that uses the client's FailFast retry policy. // RetryClusterClient implements a ClusterClient that uses the client's FailFast retry policy.
func RetryClusterClient(c *Client) pb.ClusterClient { func RetryClusterClient(c *Client) pb.ClusterClient {
return &retryClusterClient{pb.NewClusterClient(c.conn), c.retryWrapper} return &retryClusterClient{pb.NewClusterClient(c.conn), c.newRetryWrapper(isWriteStopError)}
} }
func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) { func (rcc *retryClusterClient) MemberAdd(ctx context.Context, in *pb.MemberAddRequest, opts ...grpc.CallOption) (resp *pb.MemberAddResponse, err error) {
@@ -201,7 +214,7 @@ type retryAuthClient struct {
// RetryAuthClient implements a AuthClient that uses the client's FailFast retry policy. // RetryAuthClient implements a AuthClient that uses the client's FailFast retry policy.
func RetryAuthClient(c *Client) pb.AuthClient { func RetryAuthClient(c *Client) pb.AuthClient {
return &retryAuthClient{pb.NewAuthClient(c.conn), c.retryWrapper} return &retryAuthClient{pb.NewAuthClient(c.conn), c.newRetryWrapper(isWriteStopError)}
} }
func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) { func (rac *retryAuthClient) AuthEnable(ctx context.Context, in *pb.AuthEnableRequest, opts ...grpc.CallOption) (resp *pb.AuthEnableResponse, err error) {

View File

@@ -25,6 +25,7 @@ import (
"golang.org/x/net/context" "golang.org/x/net/context"
"google.golang.org/grpc" "google.golang.org/grpc"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
) )
const ( const (
@@ -213,16 +214,15 @@ func (w *watcher) newWatcherGrpcStream(inctx context.Context) *watchGrpcStream {
owner: w, owner: w,
remote: w.remote, remote: w.remote,
ctx: ctx, ctx: ctx,
ctxKey: fmt.Sprintf("%v", inctx), ctxKey: streamKeyFromCtx(inctx),
cancel: cancel, cancel: cancel,
substreams: make(map[int64]*watcherStream), substreams: make(map[int64]*watcherStream),
respc: make(chan *pb.WatchResponse),
respc: make(chan *pb.WatchResponse), reqc: make(chan *watchRequest),
reqc: make(chan *watchRequest), donec: make(chan struct{}),
donec: make(chan struct{}), errc: make(chan error, 1),
errc: make(chan error, 1), closingc: make(chan *watcherStream),
closingc: make(chan *watcherStream), resumec: make(chan struct{}),
resumec: make(chan struct{}),
} }
go wgs.run() go wgs.run()
return wgs return wgs
@@ -253,7 +253,7 @@ func (w *watcher) Watch(ctx context.Context, key string, opts ...OpOption) Watch
} }
ok := false ok := false
ctxKey := fmt.Sprintf("%v", ctx) ctxKey := streamKeyFromCtx(ctx)
// find or allocate appropriate grpc watch stream // find or allocate appropriate grpc watch stream
w.mu.Lock() w.mu.Lock()
@@ -461,7 +461,7 @@ func (w *watchGrpcStream) run() {
if ws := w.nextResume(); ws != nil { if ws := w.nextResume(); ws != nil {
wc.Send(ws.initReq.toPB()) wc.Send(ws.initReq.toPB())
} }
case pbresp.Canceled: case pbresp.Canceled && pbresp.CompactRevision == 0:
delete(cancelSet, pbresp.WatchId) delete(cancelSet, pbresp.WatchId)
if ws, ok := w.substreams[pbresp.WatchId]; ok { if ws, ok := w.substreams[pbresp.WatchId]; ok {
// signal to stream goroutine to update closingc // signal to stream goroutine to update closingc
@@ -794,3 +794,10 @@ func (wr *watchRequest) toPB() *pb.WatchRequest {
cr := &pb.WatchRequest_CreateRequest{CreateRequest: req} cr := &pb.WatchRequest_CreateRequest{CreateRequest: req}
return &pb.WatchRequest{RequestUnion: cr} return &pb.WatchRequest{RequestUnion: cr}
} }
func streamKeyFromCtx(ctx context.Context) string {
if md, ok := metadata.FromOutgoingContext(ctx); ok {
return fmt.Sprintf("%+v", md)
}
return ""
}

Some files were not shown because too many files have changed in this diff Show More