Update deps, and adjust usage of go-uuid to match new return values

This commit is contained in:
Jeff Mitchell
2016-01-13 13:40:08 -05:00
parent 239164733c
commit 21f91f73bb
146 changed files with 4718 additions and 5407 deletions

158
Godeps/Godeps.json generated
View File

@@ -7,7 +7,7 @@
"Deps": [
{
"ImportPath": "github.com/armon/go-metrics",
"Rev": "6c5fa0d8f48f4661c9ba8709799c88d425ad20f0"
"Rev": "345426c77237ece5dab0e1605c3e4b35c3f54757"
},
{
"ImportPath": "github.com/armon/go-radix",
@@ -15,103 +15,107 @@
},
{
"ImportPath": "github.com/asaskevich/govalidator",
"Comment": "v2-37-gedd46cd",
"Rev": "edd46cdac249b001c7b7d88c6d43993ea875e8d8"
"Comment": "v2-45-g1295808",
"Rev": "129580890d97de923d9d9aec06cb1aa771812aff"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/aws",
"Comment": "v1.0.4",
"Rev": "999b1591218c36d5050d1ba7266eba956e65965f"
"Comment": "v1.0.8-3-g64ecfaa",
"Rev": "64ecfaae861c2f24bb2e8cf47c257a40e2b4f0d4"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/endpoints",
"Comment": "v1.0.4",
"Rev": "999b1591218c36d5050d1ba7266eba956e65965f"
"Comment": "v1.0.8-3-g64ecfaa",
"Rev": "64ecfaae861c2f24bb2e8cf47c257a40e2b4f0d4"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/ec2query",
"Comment": "v1.0.4",
"Rev": "999b1591218c36d5050d1ba7266eba956e65965f"
"Comment": "v1.0.8-3-g64ecfaa",
"Rev": "64ecfaae861c2f24bb2e8cf47c257a40e2b4f0d4"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/json",
"Comment": "v1.0.4",
"Rev": "999b1591218c36d5050d1ba7266eba956e65965f"
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil",
"Comment": "v1.0.8-3-g64ecfaa",
"Rev": "64ecfaae861c2f24bb2e8cf47c257a40e2b4f0d4"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/jsonrpc",
"Comment": "v1.0.4",
"Rev": "999b1591218c36d5050d1ba7266eba956e65965f"
"Comment": "v1.0.8-3-g64ecfaa",
"Rev": "64ecfaae861c2f24bb2e8cf47c257a40e2b4f0d4"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/query",
"Comment": "v1.0.4",
"Rev": "999b1591218c36d5050d1ba7266eba956e65965f"
"Comment": "v1.0.8-3-g64ecfaa",
"Rev": "64ecfaae861c2f24bb2e8cf47c257a40e2b4f0d4"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/rest",
"Comment": "v1.0.4",
"Rev": "999b1591218c36d5050d1ba7266eba956e65965f"
"Comment": "v1.0.8-3-g64ecfaa",
"Rev": "64ecfaae861c2f24bb2e8cf47c257a40e2b4f0d4"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/restxml",
"Comment": "v1.0.4",
"Rev": "999b1591218c36d5050d1ba7266eba956e65965f"
"Comment": "v1.0.8-3-g64ecfaa",
"Rev": "64ecfaae861c2f24bb2e8cf47c257a40e2b4f0d4"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil",
"Comment": "v1.0.4",
"Rev": "999b1591218c36d5050d1ba7266eba956e65965f"
"Comment": "v1.0.8-3-g64ecfaa",
"Rev": "64ecfaae861c2f24bb2e8cf47c257a40e2b4f0d4"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/signer/v4",
"Comment": "v1.0.4",
"Rev": "999b1591218c36d5050d1ba7266eba956e65965f"
"Comment": "v1.0.8-3-g64ecfaa",
"Rev": "64ecfaae861c2f24bb2e8cf47c257a40e2b4f0d4"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/private/waiter",
"Comment": "v1.0.4",
"Rev": "999b1591218c36d5050d1ba7266eba956e65965f"
"Comment": "v1.0.8-3-g64ecfaa",
"Rev": "64ecfaae861c2f24bb2e8cf47c257a40e2b4f0d4"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/dynamodb",
"Comment": "v1.0.4",
"Rev": "999b1591218c36d5050d1ba7266eba956e65965f"
"Comment": "v1.0.8-3-g64ecfaa",
"Rev": "64ecfaae861c2f24bb2e8cf47c257a40e2b4f0d4"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/ec2",
"Comment": "v1.0.4",
"Rev": "999b1591218c36d5050d1ba7266eba956e65965f"
"Comment": "v1.0.8-3-g64ecfaa",
"Rev": "64ecfaae861c2f24bb2e8cf47c257a40e2b4f0d4"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/iam",
"Comment": "v1.0.4",
"Rev": "999b1591218c36d5050d1ba7266eba956e65965f"
"Comment": "v1.0.8-3-g64ecfaa",
"Rev": "64ecfaae861c2f24bb2e8cf47c257a40e2b4f0d4"
},
{
"ImportPath": "github.com/aws/aws-sdk-go/service/s3",
"Comment": "v1.0.4",
"Rev": "999b1591218c36d5050d1ba7266eba956e65965f"
"Comment": "v1.0.8-3-g64ecfaa",
"Rev": "64ecfaae861c2f24bb2e8cf47c257a40e2b4f0d4"
},
{
"ImportPath": "github.com/bgentry/speakeasy",
"Rev": "36e9cfdd690967f4f690c6edcc9ffacd006014a0"
},
{
"ImportPath": "github.com/coreos/etcd/client",
"Comment": "v2.3.0-alpha.0-117-g858857d",
"Rev": "858857d7013af3142310aa2cef9a29d6a2edb736"
"Comment": "v2.3.0-alpha.0-451-gf2a4993",
"Rev": "f2a4993c11de6976a9aafdf2be67ad176e08b585"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/pathutil",
"Comment": "v2.3.0-alpha.0-117-g858857d",
"Rev": "858857d7013af3142310aa2cef9a29d6a2edb736"
"Comment": "v2.3.0-alpha.0-451-gf2a4993",
"Rev": "f2a4993c11de6976a9aafdf2be67ad176e08b585"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/transport",
"Comment": "v2.3.0-alpha.0-117-g858857d",
"Rev": "858857d7013af3142310aa2cef9a29d6a2edb736"
"Comment": "v2.3.0-alpha.0-451-gf2a4993",
"Rev": "f2a4993c11de6976a9aafdf2be67ad176e08b585"
},
{
"ImportPath": "github.com/coreos/etcd/pkg/types",
"Comment": "v2.3.0-alpha.0-117-g858857d",
"Rev": "858857d7013af3142310aa2cef9a29d6a2edb736"
"Comment": "v2.3.0-alpha.0-451-gf2a4993",
"Rev": "f2a4993c11de6976a9aafdf2be67ad176e08b585"
},
{
"ImportPath": "github.com/duosecurity/duo_api_golang",
@@ -119,7 +123,7 @@
},
{
"ImportPath": "github.com/fatih/structs",
"Rev": "9a7733345ff091c5457cb963f498a79ecd0bdbaa"
"Rev": "dd04ebad3deb3e3833c98ce4a83e2bff65734236"
},
{
"ImportPath": "github.com/ghodss/yaml",
@@ -127,23 +131,23 @@
},
{
"ImportPath": "github.com/go-ini/ini",
"Comment": "v0-56-g03e0e7d",
"Rev": "03e0e7d51a13a91c765d8d0161246bc14a38001a"
"Comment": "v1.8.6",
"Rev": "afbd495e5aaea13597b5e14fe514ddeaa4d76fc3"
},
{
"ImportPath": "github.com/go-ldap/ldap",
"Comment": "v2.2",
"Rev": "e9a325d64989e2844be629682cb085d2c58eef8d"
"Comment": "v2.2-2-gf24903d",
"Rev": "f24903dc69b694cd18db4724d714ac47e4fad4f2"
},
{
"ImportPath": "github.com/go-sql-driver/mysql",
"Comment": "v1.2-125-gd512f20",
"Rev": "d512f204a577a4ab037a1816604c48c9c13210be"
"Comment": "v1.2-139-g6fd058c",
"Rev": "6fd058ce0d6b7ee43174e80d5a3e7f483c4dfbe5"
},
{
"ImportPath": "github.com/gocql/gocql",
"Comment": "1st_gen_framing-383-g87cc185",
"Rev": "87cc1854b57c7a4d8f4ae1d0cc358ed6ecb0f8c3"
"Comment": "pre-node-events-83-gfd8f3f0",
"Rev": "fd8f3f0e793565489da5a4f2fd043425059c3f6a"
},
{
"ImportPath": "github.com/golang/snappy",
@@ -151,7 +155,7 @@
},
{
"ImportPath": "github.com/google/go-github/github",
"Rev": "47f2593dad1971eec2f0a0971aa007fef5edbc50"
"Rev": "63fbbb283ce4913a5ac1b6de7abae50dbf594a04"
},
{
"ImportPath": "github.com/google/go-querystring/query",
@@ -159,12 +163,12 @@
},
{
"ImportPath": "github.com/hailocab/go-hostpool",
"Rev": "0637eae892be221164aff5fcbccc57171aea6406"
"Rev": "50839ee41f32bfca8d03a183031aa634b2dc1c64"
},
{
"ImportPath": "github.com/hashicorp/consul/api",
"Comment": "v0.6.0-rc2-21-g86f20a3",
"Rev": "86f20a32e5afe1fbcd98129239cdea4bf8b0d5ba"
"Comment": "v0.6.1-18-g71e3901",
"Rev": "71e3901a6592817f9ebfd7f24f4ecff8ef16e7da"
},
{
"ImportPath": "github.com/hashicorp/errwrap",
@@ -184,11 +188,11 @@
},
{
"ImportPath": "github.com/hashicorp/go-uuid",
"Rev": "2d537aea09f39d27f9aa33092154c51ebe277486"
"Rev": "132dbc4ad1e0f75b30c3785f667fedbc55b3b198"
},
{
"ImportPath": "github.com/hashicorp/golang-lru",
"Rev": "b361c4c189a958f7d0ad435952611c140751afe2"
"Rev": "5c7531c003d8bf158b0fe5063649a2f41a822146"
},
{
"ImportPath": "github.com/hashicorp/hcl",
@@ -200,26 +204,26 @@
},
{
"ImportPath": "github.com/hashicorp/serf/coordinate",
"Comment": "v0.6.4-145-ga72c045",
"Rev": "a72c0453da2ba628a013e98bf323a76be4aa1443"
"Comment": "v0.7.0-1-g39c7c06",
"Rev": "39c7c06298b480560202bec00c2c77e974e88792"
},
{
"ImportPath": "github.com/jmespath/go-jmespath",
"Comment": "0.2.2",
"Rev": "3433f3ea46d9f8019119e7dd41274e112a2359a9"
},
{
"ImportPath": "github.com/kardianos/osext",
"Rev": "10da29423eb9a6269092eebdc2be32209612d9d2"
"Comment": "0.2.2-2-gc01cf91",
"Rev": "c01cf91b011868172fdcd9f41838e80c9d716264"
},
{
"ImportPath": "github.com/lib/pq",
"Comment": "go1.0-cutoff-63-g11fc39a",
"Rev": "11fc39a580a008f1f39bb3d11d984fb34ed778d9"
"Comment": "go1.0-cutoff-69-g2d785ad",
"Rev": "2d785ad24be7a038e2fc35424b993645a0025425"
},
{
"ImportPath": "github.com/mattn/go-isatty",
"Rev": "56b76bdf51f7708750eac80fa38b952bb9f32639"
},
{
"ImportPath": "github.com/mitchellh/cli",
"Rev": "8102d0ed5ea2709ade1243798785888175f6e415"
"Rev": "cb6853d606ea4a12a15ac83cc43503df99fd28fb"
},
{
"ImportPath": "github.com/mitchellh/copystructure",
@@ -252,35 +256,35 @@
},
{
"ImportPath": "golang.org/x/crypto/bcrypt",
"Rev": "7b85b097bf7527677d54d3220065e966a0e3b613"
"Rev": "d67eb63455fa4d6fca5802332d86f1f204017e00"
},
{
"ImportPath": "golang.org/x/crypto/blowfish",
"Rev": "7b85b097bf7527677d54d3220065e966a0e3b613"
"Rev": "d67eb63455fa4d6fca5802332d86f1f204017e00"
},
{
"ImportPath": "golang.org/x/crypto/cast5",
"Rev": "7b85b097bf7527677d54d3220065e966a0e3b613"
"Rev": "d67eb63455fa4d6fca5802332d86f1f204017e00"
},
{
"ImportPath": "golang.org/x/crypto/curve25519",
"Rev": "7b85b097bf7527677d54d3220065e966a0e3b613"
"Rev": "d67eb63455fa4d6fca5802332d86f1f204017e00"
},
{
"ImportPath": "golang.org/x/crypto/openpgp",
"Rev": "7b85b097bf7527677d54d3220065e966a0e3b613"
"Rev": "d67eb63455fa4d6fca5802332d86f1f204017e00"
},
{
"ImportPath": "golang.org/x/crypto/ssh",
"Rev": "7b85b097bf7527677d54d3220065e966a0e3b613"
"Rev": "d67eb63455fa4d6fca5802332d86f1f204017e00"
},
{
"ImportPath": "golang.org/x/net/context",
"Rev": "195180cfebf7362bd243a52477697895128c8777"
"Rev": "c93a9b4f2af537028078fd467936d5bd6320e126"
},
{
"ImportPath": "golang.org/x/oauth2",
"Rev": "442624c9ec9243441e83b374a9e22ac549b5c51d"
"Rev": "2baa8a1b9338cf13d9eeb27696d761155fa480be"
},
{
"ImportPath": "gopkg.in/asn1-ber.v1",
@@ -293,7 +297,7 @@
},
{
"ImportPath": "gopkg.in/yaml.v2",
"Rev": "53feefa2559fb8dfa8d81baad31be332c97d6c77"
"Rev": "f7716cbe52baa25d2e9b0d0da546fcf909fc16b4"
}
]
}

View File

@@ -80,19 +80,11 @@ func (s *DogStatsdSink) parseKey(key []string) ([]string, []string) {
// Implementation of methods in the MetricSink interface
func (s *DogStatsdSink) SetGauge(key []string, val float32) {
key, tags := s.parseKey(key)
flatKey := s.flattenKey(key)
rate := 1.0
s.client.Gauge(flatKey, float64(val), tags, rate)
s.SetGaugeWithTags(key, val, []string{})
}
func (s *DogStatsdSink) IncrCounter(key []string, val float32) {
key, tags := s.parseKey(key)
flatKey := s.flattenKey(key)
rate := 1.0
s.client.Count(flatKey, int64(val), tags, rate)
s.IncrCounterWithTags(key, val, []string{})
}
// EmitKey is not implemented since DogStatsd does not provide a metric type that holds an
@@ -101,9 +93,33 @@ func (s *DogStatsdSink) EmitKey(key []string, val float32) {
}
func (s *DogStatsdSink) AddSample(key []string, val float32) {
key, tags := s.parseKey(key)
flatKey := s.flattenKey(key)
s.AddSampleWithTags(key, val, []string{})
}
// The following ...WithTags methods correspond to Datadog's Tag extension to Statsd.
// http://docs.datadoghq.com/guides/dogstatsd/#tags
func (s *DogStatsdSink) SetGaugeWithTags(key []string, val float32, tags []string) {
flatKey, tags := s.getFlatkeyAndCombinedTags(key, tags)
rate := 1.0
s.client.Gauge(flatKey, float64(val), tags, rate)
}
func (s *DogStatsdSink) IncrCounterWithTags(key []string, val float32, tags []string) {
flatKey, tags := s.getFlatkeyAndCombinedTags(key, tags)
rate := 1.0
s.client.Count(flatKey, int64(val), tags, rate)
}
func (s *DogStatsdSink) AddSampleWithTags(key []string, val float32, tags []string) {
flatKey, tags := s.getFlatkeyAndCombinedTags(key, tags)
rate := 1.0
s.client.TimeInMilliseconds(flatKey, float64(val), tags, rate)
}
func (s *DogStatsdSink) getFlatkeyAndCombinedTags(key []string, tags []string) (flattenedKey string, combinedTags []string) {
key, hostTags := s.parseKey(key)
flatKey := s.flattenKey(key)
tags = append(tags, hostTags...)
return flatKey, tags
}

View File

@@ -32,6 +32,7 @@ func Abs(value float64) float64
func BlackList(str, chars string) string
func ByteLength(str string, params ...string) bool
func StringLength(str string, params ...string) bool
func StringMatches(s string, params ...string) bool
func CamelCaseToUnderscore(str string) string
func Contains(str, substring string) bool
func Count(array []interface{}, iterator ConditionIterator) int
@@ -85,6 +86,7 @@ func IsRGBcolor(str string) bool
func IsRequestURI(rawurl string) bool
func IsRequestURL(rawurl string) bool
func IsSSN(str string) bool
func IsSemver(str string) bool
func IsURL(str string) bool
func IsUTFDigit(str string) bool
func IsUTFLetter(str string) bool
@@ -210,7 +212,8 @@ Here is a list of available validators for struct fields (validator - used funct
"requri": IsRequestURI,
"requrl": IsRequestURL,
"rgbcolor": IsRGBcolor,
"ssn": IsSSN
"ssn": IsSSN,
"semver": IsSemver,
"uppercase": IsUpperCase,
"url": IsURL,
"utfdigit": IsUTFDigit,
@@ -227,6 +230,7 @@ Validators with parameters
```go
"length(min|max)": ByteLength,
"matches(pattern)": StringMatches,
```
And here is small example of usage:

View File

@@ -33,6 +33,7 @@ const (
SSN string = `^\d{3}[- ]?\d{2}[- ]?\d{4}$`
WinPath string = `^[a-zA-Z]:\\(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$`
UnixPath string = `^((?:\/[a-zA-Z0-9\.\:]+(?:_[a-zA-Z0-9\:\.]+)*(?:\-[\:a-zA-Z0-9\.]+)*)+\/?)$`
Semver string = "^v?(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)\\.(?:0|[1-9]\\d*)(-(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*)(\\.(0|[1-9]\\d*|\\d*[a-zA-Z-][0-9a-zA-Z-]*))*)?(\\+[0-9a-zA-Z-]+(\\.[0-9a-zA-Z-]+)*)?$"
tagName string = "valid"
)
@@ -76,4 +77,5 @@ var (
rxSSN = regexp.MustCompile(SSN)
rxWinPath = regexp.MustCompile(WinPath)
rxUnixPath = regexp.MustCompile(UnixPath)
rxSemver = regexp.MustCompile(Semver)
)

View File

@@ -28,11 +28,13 @@ type stringValues []reflect.Value
var ParamTagMap = map[string]ParamValidator{
"length": ByteLength,
"stringlength": StringLength,
"matches": StringMatches,
}
var ParamTagRegexMap = map[string]*regexp.Regexp{
"length": regexp.MustCompile("^length\\((\\d+)\\|(\\d+)\\)$"),
"stringlength": regexp.MustCompile("^stringlength\\((\\d+)\\|(\\d+)\\)$"),
"matches": regexp.MustCompile(`matches\(([^)]+)\)`),
}
// CustomTypeTagMap is a map of functions that can be used as tags for ValidateStruct function.
@@ -84,6 +86,7 @@ var TagMap = map[string]Validator{
"latitude": IsLatitude,
"longitude": IsLongitude,
"ssn": IsSSN,
"semver": IsSemver,
}
// ISO3166Entry stores country codes

View File

@@ -458,13 +458,13 @@ func IsIP(str string) bool {
// IsIPv4 check if the string is an IP version 4.
func IsIPv4(str string) bool {
ip := net.ParseIP(str)
return ip != nil && ip.To4() != nil
return ip != nil && strings.Contains(str, ".")
}
// IsIPv6 check if the string is an IP version 6.
func IsIPv6(str string) bool {
ip := net.ParseIP(str)
return ip != nil && ip.To4() == nil
return ip != nil && strings.Contains(str, ":")
}
// IsMAC check if a string is valid MAC address.
@@ -563,6 +563,11 @@ func IsSSN(str string) bool {
return rxSSN.MatchString(str)
}
// IsSemver check if string is valid semantic version
func IsSemver(str string) bool {
return rxSemver.MatchString(str)
}
// ByteLength check string's length
func ByteLength(str string, params ...string) bool {
if len(params) == 2 {
@@ -574,6 +579,15 @@ func ByteLength(str string, params ...string) bool {
return false
}
// StringMatches checks if a string matches a given pattern.
func StringMatches(s string, params ...string) bool {
if len(params) == 1 {
pattern := params[0]
return Matches(s, pattern)
}
return false
}
// StringLength check string's length (including multi byte strings)
func StringLength(str string, params ...string) bool {
@@ -667,7 +681,7 @@ func typeCheck(v reflect.Value, t reflect.StructField) (bool, error) {
negate = true
}
if ok := isValidTag(tagOpt); !ok {
err := fmt.Errorf("Unkown Validator %s", tagOpt)
err := fmt.Errorf("Unknown Validator %s", tagOpt)
return false, Error{t.Name, err}
}

View File

@@ -108,7 +108,7 @@ const logRespMsg = `DEBUG: Response %s/%s Details:
-----------------------------------------------------`
func logResponse(r *request.Request) {
var msg = "no reponse data"
var msg = "no response data"
if r.HTTPResponse != nil {
logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody)
dumpedBody, _ := httputil.DumpResponse(r.HTTPResponse, logBody)

View File

@@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
const SDKVersion = "1.0.4"
const SDKVersion = "1.0.8"

View File

@@ -78,6 +78,9 @@
"ap-northeast-1/s3": {
"endpoint": "s3-{region}.amazonaws.com"
},
"ap-northeast-2/s3": {
"endpoint": "s3-{region}.amazonaws.com"
},
"sa-east-1/s3": {
"endpoint": "s3-{region}.amazonaws.com"
},

View File

@@ -57,6 +57,9 @@ var endpointsMap = endpointStruct{
"ap-northeast-1/s3": {
Endpoint: "s3-{region}.amazonaws.com",
},
"ap-northeast-2/s3": {
Endpoint: "s3-{region}.amazonaws.com",
},
"ap-southeast-1/s3": {
Endpoint: "s3-{region}.amazonaws.com",
},

View File

@@ -1,100 +0,0 @@
package jsonutil_test
import (
"encoding/json"
"testing"
"time"
"github.com/aws/aws-sdk-go/private/protocol/json/jsonutil"
"github.com/stretchr/testify/assert"
)
func S(s string) *string {
return &s
}
func D(s int64) *int64 {
return &s
}
func F(s float64) *float64 {
return &s
}
func T(s time.Time) *time.Time {
return &s
}
type J struct {
S *string
SS []string
D *int64
F *float64
T *time.Time
}
var jsonTests = []struct {
in interface{}
out string
err string
}{
{
J{},
`{}`,
``,
},
{
J{
S: S("str"),
SS: []string{"A", "B", "C"},
D: D(123),
F: F(4.56),
T: T(time.Unix(987, 0)),
},
`{"S":"str","SS":["A","B","C"],"D":123,"F":4.56,"T":987}`,
``,
},
{
J{
S: S(`"''"`),
},
`{"S":"\"''\""}`,
``,
},
{
J{
S: S("\x00føø\u00FF\n\\\"\r\t\b\f"),
},
`{"S":"\u0000føøÿ\n\\\"\r\t\b\f"}`,
``,
},
}
func TestBuildJSON(t *testing.T) {
for _, test := range jsonTests {
out, err := jsonutil.BuildJSON(test.in)
if test.err != "" {
assert.Error(t, err)
assert.Contains(t, err.Error(), test.err)
} else {
assert.NoError(t, err)
assert.Equal(t, string(out), test.out)
}
}
}
func BenchmarkBuildJSON(b *testing.B) {
for i := 0; i < b.N; i++ {
for _, test := range jsonTests {
jsonutil.BuildJSON(test.in)
}
}
}
func BenchmarkStdlibJSON(b *testing.B) {
for i := 0; i < b.N; i++ {
for _, test := range jsonTests {
json.Marshal(test.in)
}
}
}

View File

@@ -1,71 +0,0 @@
// +build bench
package jsonrpc_test
import (
"bytes"
"encoding/json"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/awstesting"
"github.com/aws/aws-sdk-go/private/protocol/json/jsonutil"
"github.com/aws/aws-sdk-go/private/protocol/jsonrpc"
"github.com/aws/aws-sdk-go/service/dynamodb"
"github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute"
)
func BenchmarkJSONRPCBuild_Simple_dynamodbPutItem(b *testing.B) {
svc := awstesting.NewClient()
params := getDynamodbPutItemParams(b)
for i := 0; i < b.N; i++ {
r := svc.NewRequest(&request.Operation{Name: "Operation"}, params, nil)
jsonrpc.Build(r)
if r.Error != nil {
b.Fatal("Unexpected error", r.Error)
}
}
}
func BenchmarkJSONUtilBuild_Simple_dynamodbPutItem(b *testing.B) {
svc := awstesting.NewClient()
params := getDynamodbPutItemParams(b)
for i := 0; i < b.N; i++ {
r := svc.NewRequest(&request.Operation{Name: "Operation"}, params, nil)
_, err := jsonutil.BuildJSON(r.Params)
if err != nil {
b.Fatal("Unexpected error", err)
}
}
}
func BenchmarkEncodingJSONMarshal_Simple_dynamodbPutItem(b *testing.B) {
params := getDynamodbPutItemParams(b)
for i := 0; i < b.N; i++ {
buf := &bytes.Buffer{}
encoder := json.NewEncoder(buf)
if err := encoder.Encode(params); err != nil {
b.Fatal("Unexpected error", err)
}
}
}
func getDynamodbPutItemParams(b *testing.B) *dynamodb.PutItemInput {
av, err := dynamodbattribute.ConvertToMap(struct {
Key string
Data string
}{Key: "MyKey", Data: "MyData"})
if err != nil {
b.Fatal("benchPutItem, expect no ConvertToMap errors", err)
}
return &dynamodb.PutItemInput{
Item: av,
TableName: aws.String("tablename"),
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,812 +0,0 @@
package jsonrpc_test
import (
"bytes"
"encoding/json"
"encoding/xml"
"io"
"io/ioutil"
"net/http"
"net/url"
"testing"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/client"
"github.com/aws/aws-sdk-go/aws/client/metadata"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/awstesting"
"github.com/aws/aws-sdk-go/private/protocol/jsonrpc"
"github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil"
"github.com/aws/aws-sdk-go/private/signer/v4"
"github.com/aws/aws-sdk-go/private/util"
"github.com/stretchr/testify/assert"
)
var _ bytes.Buffer // always import bytes
var _ http.Request
var _ json.Marshaler
var _ time.Time
var _ xmlutil.XMLNode
var _ xml.Attr
var _ = awstesting.GenerateAssertions
var _ = ioutil.Discard
var _ = util.Trim("")
var _ = url.Values{}
var _ = io.EOF
var _ = aws.String
//The service client's operations are safe to be used concurrently.
// It is not safe to mutate any of the client's properties though.
type OutputService1ProtocolTest struct {
*client.Client
}
// New creates a new instance of the OutputService1ProtocolTest client with a session.
// If additional configuration is needed for the client instance use the optional
// aws.Config parameter to add your extra config.
//
// Example:
// // Create a OutputService1ProtocolTest client from just a session.
// svc := outputservice1protocoltest.New(mySession)
//
// // Create a OutputService1ProtocolTest client with additional configuration
// svc := outputservice1protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
func NewOutputService1ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService1ProtocolTest {
c := p.ClientConfig("outputservice1protocoltest", cfgs...)
return newOutputService1ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
}
// newClient creates, initializes and returns a new service client instance.
func newOutputService1ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService1ProtocolTest {
svc := &OutputService1ProtocolTest{
Client: client.New(
cfg,
metadata.ClientInfo{
ServiceName: "outputservice1protocoltest",
SigningRegion: signingRegion,
Endpoint: endpoint,
APIVersion: "",
JSONVersion: "",
TargetPrefix: "",
},
handlers,
),
}
// Handlers
svc.Handlers.Sign.PushBack(v4.Sign)
svc.Handlers.Build.PushBack(jsonrpc.Build)
svc.Handlers.Unmarshal.PushBack(jsonrpc.Unmarshal)
svc.Handlers.UnmarshalMeta.PushBack(jsonrpc.UnmarshalMeta)
svc.Handlers.UnmarshalError.PushBack(jsonrpc.UnmarshalError)
return svc
}
// newRequest creates a new request for a OutputService1ProtocolTest operation and runs any
// custom request initialization.
func (c *OutputService1ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
req := c.NewRequest(op, params, data)
return req
}
const opOutputService1TestCaseOperation1 = "OperationName"
// OutputService1TestCaseOperation1Request generates a request for the OutputService1TestCaseOperation1 operation.
func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1Request(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (req *request.Request, output *OutputService1TestShapeOutputService1TestCaseOperation1Output) {
op := &request.Operation{
Name: opOutputService1TestCaseOperation1,
}
if input == nil {
input = &OutputService1TestShapeOutputService1TestCaseOperation1Input{}
}
req = c.newRequest(op, input, output)
output = &OutputService1TestShapeOutputService1TestCaseOperation1Output{}
req.Data = output
return
}
func (c *OutputService1ProtocolTest) OutputService1TestCaseOperation1(input *OutputService1TestShapeOutputService1TestCaseOperation1Input) (*OutputService1TestShapeOutputService1TestCaseOperation1Output, error) {
req, out := c.OutputService1TestCaseOperation1Request(input)
err := req.Send()
return out, err
}
type OutputService1TestShapeOutputService1TestCaseOperation1Input struct {
_ struct{} `type:"structure"`
}
type OutputService1TestShapeOutputService1TestCaseOperation1Output struct {
_ struct{} `type:"structure"`
Char *string `type:"character"`
Double *float64 `type:"double"`
FalseBool *bool `type:"boolean"`
Float *float64 `type:"float"`
Long *int64 `type:"long"`
Num *int64 `type:"integer"`
Str *string `type:"string"`
TrueBool *bool `type:"boolean"`
}
//The service client's operations are safe to be used concurrently.
// It is not safe to mutate any of the client's properties though.
type OutputService2ProtocolTest struct {
*client.Client
}
// New creates a new instance of the OutputService2ProtocolTest client with a session.
// If additional configuration is needed for the client instance use the optional
// aws.Config parameter to add your extra config.
//
// Example:
// // Create a OutputService2ProtocolTest client from just a session.
// svc := outputservice2protocoltest.New(mySession)
//
// // Create a OutputService2ProtocolTest client with additional configuration
// svc := outputservice2protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
func NewOutputService2ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService2ProtocolTest {
c := p.ClientConfig("outputservice2protocoltest", cfgs...)
return newOutputService2ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
}
// newClient creates, initializes and returns a new service client instance.
func newOutputService2ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService2ProtocolTest {
svc := &OutputService2ProtocolTest{
Client: client.New(
cfg,
metadata.ClientInfo{
ServiceName: "outputservice2protocoltest",
SigningRegion: signingRegion,
Endpoint: endpoint,
APIVersion: "",
JSONVersion: "",
TargetPrefix: "",
},
handlers,
),
}
// Handlers
svc.Handlers.Sign.PushBack(v4.Sign)
svc.Handlers.Build.PushBack(jsonrpc.Build)
svc.Handlers.Unmarshal.PushBack(jsonrpc.Unmarshal)
svc.Handlers.UnmarshalMeta.PushBack(jsonrpc.UnmarshalMeta)
svc.Handlers.UnmarshalError.PushBack(jsonrpc.UnmarshalError)
return svc
}
// newRequest creates a new request for a OutputService2ProtocolTest operation and runs any
// custom request initialization.
func (c *OutputService2ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
req := c.NewRequest(op, params, data)
return req
}
const opOutputService2TestCaseOperation1 = "OperationName"
// OutputService2TestCaseOperation1Request generates a request for the OutputService2TestCaseOperation1 operation.
func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1Request(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (req *request.Request, output *OutputService2TestShapeOutputService2TestCaseOperation1Output) {
op := &request.Operation{
Name: opOutputService2TestCaseOperation1,
}
if input == nil {
input = &OutputService2TestShapeOutputService2TestCaseOperation1Input{}
}
req = c.newRequest(op, input, output)
output = &OutputService2TestShapeOutputService2TestCaseOperation1Output{}
req.Data = output
return
}
func (c *OutputService2ProtocolTest) OutputService2TestCaseOperation1(input *OutputService2TestShapeOutputService2TestCaseOperation1Input) (*OutputService2TestShapeOutputService2TestCaseOperation1Output, error) {
req, out := c.OutputService2TestCaseOperation1Request(input)
err := req.Send()
return out, err
}
type OutputService2TestShapeBlobContainer struct {
_ struct{} `type:"structure"`
Foo []byte `locationName:"foo" type:"blob"`
}
type OutputService2TestShapeOutputService2TestCaseOperation1Input struct {
_ struct{} `type:"structure"`
}
type OutputService2TestShapeOutputService2TestCaseOperation1Output struct {
_ struct{} `type:"structure"`
BlobMember []byte `type:"blob"`
StructMember *OutputService2TestShapeBlobContainer `type:"structure"`
}
//The service client's operations are safe to be used concurrently.
// It is not safe to mutate any of the client's properties though.
type OutputService3ProtocolTest struct {
*client.Client
}
// New creates a new instance of the OutputService3ProtocolTest client with a session.
// If additional configuration is needed for the client instance use the optional
// aws.Config parameter to add your extra config.
//
// Example:
// // Create a OutputService3ProtocolTest client from just a session.
// svc := outputservice3protocoltest.New(mySession)
//
// // Create a OutputService3ProtocolTest client with additional configuration
// svc := outputservice3protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
func NewOutputService3ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService3ProtocolTest {
c := p.ClientConfig("outputservice3protocoltest", cfgs...)
return newOutputService3ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
}
// newClient creates, initializes and returns a new service client instance.
func newOutputService3ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService3ProtocolTest {
svc := &OutputService3ProtocolTest{
Client: client.New(
cfg,
metadata.ClientInfo{
ServiceName: "outputservice3protocoltest",
SigningRegion: signingRegion,
Endpoint: endpoint,
APIVersion: "",
JSONVersion: "",
TargetPrefix: "",
},
handlers,
),
}
// Handlers
svc.Handlers.Sign.PushBack(v4.Sign)
svc.Handlers.Build.PushBack(jsonrpc.Build)
svc.Handlers.Unmarshal.PushBack(jsonrpc.Unmarshal)
svc.Handlers.UnmarshalMeta.PushBack(jsonrpc.UnmarshalMeta)
svc.Handlers.UnmarshalError.PushBack(jsonrpc.UnmarshalError)
return svc
}
// newRequest creates a new request for a OutputService3ProtocolTest operation and runs any
// custom request initialization.
func (c *OutputService3ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
req := c.NewRequest(op, params, data)
return req
}
const opOutputService3TestCaseOperation1 = "OperationName"
// OutputService3TestCaseOperation1Request generates a request for the OutputService3TestCaseOperation1 operation.
func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1Request(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (req *request.Request, output *OutputService3TestShapeOutputService3TestCaseOperation1Output) {
op := &request.Operation{
Name: opOutputService3TestCaseOperation1,
}
if input == nil {
input = &OutputService3TestShapeOutputService3TestCaseOperation1Input{}
}
req = c.newRequest(op, input, output)
output = &OutputService3TestShapeOutputService3TestCaseOperation1Output{}
req.Data = output
return
}
func (c *OutputService3ProtocolTest) OutputService3TestCaseOperation1(input *OutputService3TestShapeOutputService3TestCaseOperation1Input) (*OutputService3TestShapeOutputService3TestCaseOperation1Output, error) {
req, out := c.OutputService3TestCaseOperation1Request(input)
err := req.Send()
return out, err
}
type OutputService3TestShapeOutputService3TestCaseOperation1Input struct {
_ struct{} `type:"structure"`
}
type OutputService3TestShapeOutputService3TestCaseOperation1Output struct {
_ struct{} `type:"structure"`
StructMember *OutputService3TestShapeTimeContainer `type:"structure"`
TimeMember *time.Time `type:"timestamp" timestampFormat:"unix"`
}
type OutputService3TestShapeTimeContainer struct {
_ struct{} `type:"structure"`
Foo *time.Time `locationName:"foo" type:"timestamp" timestampFormat:"unix"`
}
//The service client's operations are safe to be used concurrently.
// It is not safe to mutate any of the client's properties though.
type OutputService4ProtocolTest struct {
*client.Client
}
// New creates a new instance of the OutputService4ProtocolTest client with a session.
// If additional configuration is needed for the client instance use the optional
// aws.Config parameter to add your extra config.
//
// Example:
// // Create a OutputService4ProtocolTest client from just a session.
// svc := outputservice4protocoltest.New(mySession)
//
// // Create a OutputService4ProtocolTest client with additional configuration
// svc := outputservice4protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
func NewOutputService4ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService4ProtocolTest {
c := p.ClientConfig("outputservice4protocoltest", cfgs...)
return newOutputService4ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
}
// newClient creates, initializes and returns a new service client instance.
func newOutputService4ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService4ProtocolTest {
svc := &OutputService4ProtocolTest{
Client: client.New(
cfg,
metadata.ClientInfo{
ServiceName: "outputservice4protocoltest",
SigningRegion: signingRegion,
Endpoint: endpoint,
APIVersion: "",
JSONVersion: "",
TargetPrefix: "",
},
handlers,
),
}
// Handlers
svc.Handlers.Sign.PushBack(v4.Sign)
svc.Handlers.Build.PushBack(jsonrpc.Build)
svc.Handlers.Unmarshal.PushBack(jsonrpc.Unmarshal)
svc.Handlers.UnmarshalMeta.PushBack(jsonrpc.UnmarshalMeta)
svc.Handlers.UnmarshalError.PushBack(jsonrpc.UnmarshalError)
return svc
}
// newRequest creates a new request for a OutputService4ProtocolTest operation and runs any
// custom request initialization.
func (c *OutputService4ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
req := c.NewRequest(op, params, data)
return req
}
const opOutputService4TestCaseOperation1 = "OperationName"
// OutputService4TestCaseOperation1Request generates a request for the OutputService4TestCaseOperation1 operation.
func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1Request(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (req *request.Request, output *OutputService4TestShapeOutputShape) {
op := &request.Operation{
Name: opOutputService4TestCaseOperation1,
}
if input == nil {
input = &OutputService4TestShapeOutputService4TestCaseOperation1Input{}
}
req = c.newRequest(op, input, output)
output = &OutputService4TestShapeOutputShape{}
req.Data = output
return
}
func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation1(input *OutputService4TestShapeOutputService4TestCaseOperation1Input) (*OutputService4TestShapeOutputShape, error) {
req, out := c.OutputService4TestCaseOperation1Request(input)
err := req.Send()
return out, err
}
const opOutputService4TestCaseOperation2 = "OperationName"
// OutputService4TestCaseOperation2Request generates a request for the OutputService4TestCaseOperation2 operation.
func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation2Request(input *OutputService4TestShapeOutputService4TestCaseOperation2Input) (req *request.Request, output *OutputService4TestShapeOutputShape) {
op := &request.Operation{
Name: opOutputService4TestCaseOperation2,
}
if input == nil {
input = &OutputService4TestShapeOutputService4TestCaseOperation2Input{}
}
req = c.newRequest(op, input, output)
output = &OutputService4TestShapeOutputShape{}
req.Data = output
return
}
func (c *OutputService4ProtocolTest) OutputService4TestCaseOperation2(input *OutputService4TestShapeOutputService4TestCaseOperation2Input) (*OutputService4TestShapeOutputShape, error) {
req, out := c.OutputService4TestCaseOperation2Request(input)
err := req.Send()
return out, err
}
type OutputService4TestShapeOutputService4TestCaseOperation1Input struct {
_ struct{} `type:"structure"`
}
type OutputService4TestShapeOutputService4TestCaseOperation2Input struct {
_ struct{} `type:"structure"`
}
type OutputService4TestShapeOutputShape struct {
_ struct{} `type:"structure"`
ListMember []*string `type:"list"`
ListMemberMap []map[string]*string `type:"list"`
ListMemberStruct []*OutputService4TestShapeStructType `type:"list"`
}
type OutputService4TestShapeStructType struct {
_ struct{} `type:"structure"`
}
//The service client's operations are safe to be used concurrently.
// It is not safe to mutate any of the client's properties though.
type OutputService5ProtocolTest struct {
*client.Client
}
// New creates a new instance of the OutputService5ProtocolTest client with a session.
// If additional configuration is needed for the client instance use the optional
// aws.Config parameter to add your extra config.
//
// Example:
// // Create a OutputService5ProtocolTest client from just a session.
// svc := outputservice5protocoltest.New(mySession)
//
// // Create a OutputService5ProtocolTest client with additional configuration
// svc := outputservice5protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
func NewOutputService5ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService5ProtocolTest {
c := p.ClientConfig("outputservice5protocoltest", cfgs...)
return newOutputService5ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
}
// newClient creates, initializes and returns a new service client instance.
func newOutputService5ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService5ProtocolTest {
svc := &OutputService5ProtocolTest{
Client: client.New(
cfg,
metadata.ClientInfo{
ServiceName: "outputservice5protocoltest",
SigningRegion: signingRegion,
Endpoint: endpoint,
APIVersion: "",
JSONVersion: "",
TargetPrefix: "",
},
handlers,
),
}
// Handlers
svc.Handlers.Sign.PushBack(v4.Sign)
svc.Handlers.Build.PushBack(jsonrpc.Build)
svc.Handlers.Unmarshal.PushBack(jsonrpc.Unmarshal)
svc.Handlers.UnmarshalMeta.PushBack(jsonrpc.UnmarshalMeta)
svc.Handlers.UnmarshalError.PushBack(jsonrpc.UnmarshalError)
return svc
}
// newRequest creates a new request for a OutputService5ProtocolTest operation and runs any
// custom request initialization.
func (c *OutputService5ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
req := c.NewRequest(op, params, data)
return req
}
const opOutputService5TestCaseOperation1 = "OperationName"
// OutputService5TestCaseOperation1Request generates a request for the OutputService5TestCaseOperation1 operation.
func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1Request(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (req *request.Request, output *OutputService5TestShapeOutputService5TestCaseOperation1Output) {
op := &request.Operation{
Name: opOutputService5TestCaseOperation1,
}
if input == nil {
input = &OutputService5TestShapeOutputService5TestCaseOperation1Input{}
}
req = c.newRequest(op, input, output)
output = &OutputService5TestShapeOutputService5TestCaseOperation1Output{}
req.Data = output
return
}
func (c *OutputService5ProtocolTest) OutputService5TestCaseOperation1(input *OutputService5TestShapeOutputService5TestCaseOperation1Input) (*OutputService5TestShapeOutputService5TestCaseOperation1Output, error) {
req, out := c.OutputService5TestCaseOperation1Request(input)
err := req.Send()
return out, err
}
type OutputService5TestShapeOutputService5TestCaseOperation1Input struct {
_ struct{} `type:"structure"`
}
type OutputService5TestShapeOutputService5TestCaseOperation1Output struct {
_ struct{} `type:"structure"`
MapMember map[string][]*int64 `type:"map"`
}
//The service client's operations are safe to be used concurrently.
// It is not safe to mutate any of the client's properties though.
type OutputService6ProtocolTest struct {
*client.Client
}
// New creates a new instance of the OutputService6ProtocolTest client with a session.
// If additional configuration is needed for the client instance use the optional
// aws.Config parameter to add your extra config.
//
// Example:
// // Create a OutputService6ProtocolTest client from just a session.
// svc := outputservice6protocoltest.New(mySession)
//
// // Create a OutputService6ProtocolTest client with additional configuration
// svc := outputservice6protocoltest.New(mySession, aws.NewConfig().WithRegion("us-west-2"))
func NewOutputService6ProtocolTest(p client.ConfigProvider, cfgs ...*aws.Config) *OutputService6ProtocolTest {
c := p.ClientConfig("outputservice6protocoltest", cfgs...)
return newOutputService6ProtocolTestClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion)
}
// newClient creates, initializes and returns a new service client instance.
func newOutputService6ProtocolTestClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *OutputService6ProtocolTest {
svc := &OutputService6ProtocolTest{
Client: client.New(
cfg,
metadata.ClientInfo{
ServiceName: "outputservice6protocoltest",
SigningRegion: signingRegion,
Endpoint: endpoint,
APIVersion: "",
JSONVersion: "",
TargetPrefix: "",
},
handlers,
),
}
// Handlers
svc.Handlers.Sign.PushBack(v4.Sign)
svc.Handlers.Build.PushBack(jsonrpc.Build)
svc.Handlers.Unmarshal.PushBack(jsonrpc.Unmarshal)
svc.Handlers.UnmarshalMeta.PushBack(jsonrpc.UnmarshalMeta)
svc.Handlers.UnmarshalError.PushBack(jsonrpc.UnmarshalError)
return svc
}
// newRequest creates a new request for a OutputService6ProtocolTest operation and runs any
// custom request initialization.
func (c *OutputService6ProtocolTest) newRequest(op *request.Operation, params, data interface{}) *request.Request {
req := c.NewRequest(op, params, data)
return req
}
const opOutputService6TestCaseOperation1 = "OperationName"
// OutputService6TestCaseOperation1Request generates a request for the OutputService6TestCaseOperation1 operation.
func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1Request(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (req *request.Request, output *OutputService6TestShapeOutputService6TestCaseOperation1Output) {
op := &request.Operation{
Name: opOutputService6TestCaseOperation1,
}
if input == nil {
input = &OutputService6TestShapeOutputService6TestCaseOperation1Input{}
}
req = c.newRequest(op, input, output)
output = &OutputService6TestShapeOutputService6TestCaseOperation1Output{}
req.Data = output
return
}
func (c *OutputService6ProtocolTest) OutputService6TestCaseOperation1(input *OutputService6TestShapeOutputService6TestCaseOperation1Input) (*OutputService6TestShapeOutputService6TestCaseOperation1Output, error) {
req, out := c.OutputService6TestCaseOperation1Request(input)
err := req.Send()
return out, err
}
type OutputService6TestShapeOutputService6TestCaseOperation1Input struct {
_ struct{} `type:"structure"`
}
type OutputService6TestShapeOutputService6TestCaseOperation1Output struct {
_ struct{} `type:"structure"`
StrType *string `type:"string"`
}
//
// Tests begin here
//
func TestOutputService1ProtocolTestScalarMembersCase1(t *testing.T) {
sess := session.New()
svc := NewOutputService1ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
buf := bytes.NewReader([]byte("{\"Str\": \"myname\", \"Num\": 123, \"FalseBool\": false, \"TrueBool\": true, \"Float\": 1.2, \"Double\": 1.3, \"Long\": 200, \"Char\": \"a\"}"))
req, out := svc.OutputService1TestCaseOperation1Request(nil)
req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
// set headers
// unmarshal response
jsonrpc.UnmarshalMeta(req)
jsonrpc.Unmarshal(req)
assert.NoError(t, req.Error)
// assert response
assert.NotNil(t, out) // ensure out variable is used
assert.Equal(t, "a", *out.Char)
assert.Equal(t, 1.3, *out.Double)
assert.Equal(t, false, *out.FalseBool)
assert.Equal(t, 1.2, *out.Float)
assert.Equal(t, int64(200), *out.Long)
assert.Equal(t, int64(123), *out.Num)
assert.Equal(t, "myname", *out.Str)
assert.Equal(t, true, *out.TrueBool)
}
func TestOutputService2ProtocolTestBlobMembersCase1(t *testing.T) {
sess := session.New()
svc := NewOutputService2ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
buf := bytes.NewReader([]byte("{\"BlobMember\": \"aGkh\", \"StructMember\": {\"foo\": \"dGhlcmUh\"}}"))
req, out := svc.OutputService2TestCaseOperation1Request(nil)
req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
// set headers
// unmarshal response
jsonrpc.UnmarshalMeta(req)
jsonrpc.Unmarshal(req)
assert.NoError(t, req.Error)
// assert response
assert.NotNil(t, out) // ensure out variable is used
assert.Equal(t, "hi!", string(out.BlobMember))
assert.Equal(t, "there!", string(out.StructMember.Foo))
}
func TestOutputService3ProtocolTestTimestampMembersCase1(t *testing.T) {
sess := session.New()
svc := NewOutputService3ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
buf := bytes.NewReader([]byte("{\"TimeMember\": 1398796238, \"StructMember\": {\"foo\": 1398796238}}"))
req, out := svc.OutputService3TestCaseOperation1Request(nil)
req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
// set headers
// unmarshal response
jsonrpc.UnmarshalMeta(req)
jsonrpc.Unmarshal(req)
assert.NoError(t, req.Error)
// assert response
assert.NotNil(t, out) // ensure out variable is used
assert.Equal(t, time.Unix(1.398796238e+09, 0).UTC().String(), out.StructMember.Foo.String())
assert.Equal(t, time.Unix(1.398796238e+09, 0).UTC().String(), out.TimeMember.String())
}
func TestOutputService4ProtocolTestListsCase1(t *testing.T) {
sess := session.New()
svc := NewOutputService4ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
buf := bytes.NewReader([]byte("{\"ListMember\": [\"a\", \"b\"]}"))
req, out := svc.OutputService4TestCaseOperation1Request(nil)
req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
// set headers
// unmarshal response
jsonrpc.UnmarshalMeta(req)
jsonrpc.Unmarshal(req)
assert.NoError(t, req.Error)
// assert response
assert.NotNil(t, out) // ensure out variable is used
assert.Equal(t, "a", *out.ListMember[0])
assert.Equal(t, "b", *out.ListMember[1])
}
func TestOutputService4ProtocolTestListsCase2(t *testing.T) {
sess := session.New()
svc := NewOutputService4ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
buf := bytes.NewReader([]byte("{\"ListMember\": [\"a\", null], \"ListMemberMap\": [{}, null, null, {}], \"ListMemberStruct\": [{}, null, null, {}]}"))
req, out := svc.OutputService4TestCaseOperation2Request(nil)
req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
// set headers
// unmarshal response
jsonrpc.UnmarshalMeta(req)
jsonrpc.Unmarshal(req)
assert.NoError(t, req.Error)
// assert response
assert.NotNil(t, out) // ensure out variable is used
assert.Equal(t, "a", *out.ListMember[0])
assert.Nil(t, out.ListMember[1])
assert.Nil(t, out.ListMemberMap[1])
assert.Nil(t, out.ListMemberMap[2])
assert.Nil(t, out.ListMemberStruct[1])
assert.Nil(t, out.ListMemberStruct[2])
}
func TestOutputService5ProtocolTestMapsCase1(t *testing.T) {
sess := session.New()
svc := NewOutputService5ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
buf := bytes.NewReader([]byte("{\"MapMember\": {\"a\": [1, 2], \"b\": [3, 4]}}"))
req, out := svc.OutputService5TestCaseOperation1Request(nil)
req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
// set headers
// unmarshal response
jsonrpc.UnmarshalMeta(req)
jsonrpc.Unmarshal(req)
assert.NoError(t, req.Error)
// assert response
assert.NotNil(t, out) // ensure out variable is used
assert.Equal(t, int64(1), *out.MapMember["a"][0])
assert.Equal(t, int64(2), *out.MapMember["a"][1])
assert.Equal(t, int64(3), *out.MapMember["b"][0])
assert.Equal(t, int64(4), *out.MapMember["b"][1])
}
func TestOutputService6ProtocolTestIgnoresExtraDataCase1(t *testing.T) {
sess := session.New()
svc := NewOutputService6ProtocolTest(sess, &aws.Config{Endpoint: aws.String("https://test")})
buf := bytes.NewReader([]byte("{\"foo\": \"bar\"}"))
req, out := svc.OutputService6TestCaseOperation1Request(nil)
req.HTTPResponse = &http.Response{StatusCode: 200, Body: ioutil.NopCloser(buf), Header: http.Header{}}
// set headers
// unmarshal response
jsonrpc.UnmarshalMeta(req)
jsonrpc.Unmarshal(req)
assert.NoError(t, req.Error)
// assert response
assert.NotNil(t, out) // ensure out variable is used
}

View File

@@ -25,5 +25,5 @@ func Unmarshal(r *request.Request) {
// UnmarshalMeta unmarshals header response values for an AWS Query service.
func UnmarshalMeta(r *request.Request) {
// TODO implement unmarshaling of request IDs
r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid")
}

View File

@@ -24,10 +24,14 @@ func UnmarshalError(r *request.Request) {
if err != nil && err != io.EOF {
r.Error = awserr.New("SerializationError", "failed to decode query XML error response", err)
} else {
reqID := resp.RequestID
if reqID == "" {
reqID = r.RequestID
}
r.Error = awserr.NewRequestFailure(
awserr.New(resp.Code, resp.Message, nil),
r.HTTPResponse.StatusCode,
resp.RequestID,
reqID,
)
}
}

View File

@@ -26,6 +26,10 @@ func Unmarshal(r *request.Request) {
// UnmarshalMeta unmarshals the REST metadata of a response in a REST service
func UnmarshalMeta(r *request.Request) {
r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid")
if r.RequestID == "" {
// Alternative version of request id in the header
r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id")
}
if r.DataFilled() {
v := reflect.Indirect(reflect.ValueOf(r.Data))
unmarshalLocationElements(r, v)

View File

@@ -51,17 +51,15 @@ func (w *Waiter) Wait() error {
err := req.Send()
for _, a := range w.Acceptors {
if err != nil && a.Matcher != "error" {
// Only matcher error is valid if there is a request error
continue
}
result := false
var vals []interface{}
switch a.Matcher {
case "pathAll", "path":
// Require all matches to be equal for result to match
vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument)
if len(vals) == 0 {
break
}
result = true
for _, val := range vals {
if !awsutil.DeepEqual(val, a.Expected) {

View File

@@ -39,15 +39,19 @@ func init() {
}
}
func drainBody(b io.ReadCloser) (out *bytes.Buffer, err error) {
var buf bytes.Buffer
func drainBody(b io.ReadCloser, length int64) (out *bytes.Buffer, err error) {
if length < 0 {
length = 0
}
buf := bytes.NewBuffer(make([]byte, 0, length))
if _, err = buf.ReadFrom(b); err != nil {
return nil, err
}
if err = b.Close(); err != nil {
return nil, err
}
return &buf, nil
return buf, nil
}
func disableCompression(r *request.Request) {
@@ -75,7 +79,7 @@ func validateCRC32(r *request.Request) {
return // Could not determine CRC value, skip
}
buf, err := drainBody(r.HTTPResponse.Body)
buf, err := drainBody(r.HTTPResponse.Body, r.HTTPResponse.ContentLength)
if err != nil { // failed to read the response body, skip
return
}

View File

@@ -1,105 +0,0 @@
package dynamodb_test
import (
"bytes"
"io/ioutil"
"net/http"
"os"
"testing"
"github.com/stretchr/testify/assert"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/awstesting/unit"
"github.com/aws/aws-sdk-go/service/dynamodb"
)
var db *dynamodb.DynamoDB
func TestMain(m *testing.M) {
db = dynamodb.New(unit.Session, &aws.Config{
MaxRetries: aws.Int(2),
})
db.Handlers.Send.Clear() // mock sending
os.Exit(m.Run())
}
func mockCRCResponse(svc *dynamodb.DynamoDB, status int, body, crc string) (req *request.Request) {
header := http.Header{}
header.Set("x-amz-crc32", crc)
req, _ = svc.ListTablesRequest(nil)
req.Handlers.Send.PushBack(func(*request.Request) {
req.HTTPResponse = &http.Response{
StatusCode: status,
Body: ioutil.NopCloser(bytes.NewReader([]byte(body))),
Header: header,
}
})
req.Send()
return
}
func TestDefaultRetryRules(t *testing.T) {
d := dynamodb.New(unit.Session, &aws.Config{MaxRetries: aws.Int(-1)})
assert.Equal(t, d.MaxRetries(), 10)
}
func TestCustomRetryRules(t *testing.T) {
d := dynamodb.New(unit.Session, &aws.Config{MaxRetries: aws.Int(2)})
assert.Equal(t, d.MaxRetries(), 2)
}
func TestValidateCRC32NoHeaderSkip(t *testing.T) {
req := mockCRCResponse(db, 200, "{}", "")
assert.NoError(t, req.Error)
}
func TestValidateCRC32InvalidHeaderSkip(t *testing.T) {
req := mockCRCResponse(db, 200, "{}", "ABC")
assert.NoError(t, req.Error)
}
func TestValidateCRC32AlreadyErrorSkip(t *testing.T) {
req := mockCRCResponse(db, 400, "{}", "1234")
assert.Error(t, req.Error)
assert.NotEqual(t, "CRC32CheckFailed", req.Error.(awserr.Error).Code())
}
func TestValidateCRC32IsValid(t *testing.T) {
req := mockCRCResponse(db, 200, `{"TableNames":["A"]}`, "3090163698")
assert.NoError(t, req.Error)
// CRC check does not affect output parsing
out := req.Data.(*dynamodb.ListTablesOutput)
assert.Equal(t, "A", *out.TableNames[0])
}
func TestValidateCRC32DoesNotMatch(t *testing.T) {
req := mockCRCResponse(db, 200, "{}", "1234")
assert.Error(t, req.Error)
assert.Equal(t, "CRC32CheckFailed", req.Error.(awserr.Error).Code())
assert.Equal(t, 2, req.RetryCount)
}
func TestValidateCRC32DoesNotMatchNoComputeChecksum(t *testing.T) {
svc := dynamodb.New(unit.Session, &aws.Config{
MaxRetries: aws.Int(2),
DisableComputeChecksums: aws.Bool(true),
})
svc.Handlers.Send.Clear() // mock sending
req := mockCRCResponse(svc, 200, `{"TableNames":["A"]}`, "1234")
assert.NoError(t, req.Error)
assert.Equal(t, 0, int(req.RetryCount))
// CRC check disabled. Does not affect output parsing
out := req.Data.(*dynamodb.ListTablesOutput)
assert.Equal(t, "A", *out.TableNames[0])
}

View File

@@ -1,488 +0,0 @@
package dynamodbattribute
import (
"math"
"reflect"
"testing"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/awsutil"
"github.com/aws/aws-sdk-go/service/dynamodb"
)
type mySimpleStruct struct {
String string
Int int
Uint uint
Float32 float32
Float64 float64
Bool bool
Null *interface{}
}
type myComplexStruct struct {
Simple []mySimpleStruct
}
type converterTestInput struct {
input interface{}
expected interface{}
err awserr.Error
inputType string // "enum" of types
}
var trueValue = true
var falseValue = false
var converterScalarInputs = []converterTestInput{
{
input: nil,
expected: &dynamodb.AttributeValue{NULL: &trueValue},
},
{
input: "some string",
expected: &dynamodb.AttributeValue{S: aws.String("some string")},
},
{
input: true,
expected: &dynamodb.AttributeValue{BOOL: &trueValue},
},
{
input: false,
expected: &dynamodb.AttributeValue{BOOL: &falseValue},
},
{
input: 3.14,
expected: &dynamodb.AttributeValue{N: aws.String("3.14")},
},
{
input: math.MaxFloat32,
expected: &dynamodb.AttributeValue{N: aws.String("340282346638528860000000000000000000000")},
},
{
input: math.MaxFloat64,
expected: &dynamodb.AttributeValue{N: aws.String("179769313486231570000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")},
},
{
input: 12,
expected: &dynamodb.AttributeValue{N: aws.String("12")},
},
{
input: mySimpleStruct{},
expected: &dynamodb.AttributeValue{
M: map[string]*dynamodb.AttributeValue{
"Bool": {BOOL: &falseValue},
"Float32": {N: aws.String("0")},
"Float64": {N: aws.String("0")},
"Int": {N: aws.String("0")},
"Null": {NULL: &trueValue},
"String": {S: aws.String("")},
"Uint": {N: aws.String("0")},
},
},
inputType: "mySimpleStruct",
},
}
var converterMapTestInputs = []converterTestInput{
// Scalar tests
{
input: nil,
err: awserr.New("SerializationError", "in must be a map[string]interface{} or struct, got <nil>", nil),
},
{
input: map[string]interface{}{"string": "some string"},
expected: map[string]*dynamodb.AttributeValue{"string": {S: aws.String("some string")}},
},
{
input: map[string]interface{}{"bool": true},
expected: map[string]*dynamodb.AttributeValue{"bool": {BOOL: &trueValue}},
},
{
input: map[string]interface{}{"bool": false},
expected: map[string]*dynamodb.AttributeValue{"bool": {BOOL: &falseValue}},
},
{
input: map[string]interface{}{"null": nil},
expected: map[string]*dynamodb.AttributeValue{"null": {NULL: &trueValue}},
},
{
input: map[string]interface{}{"float": 3.14},
expected: map[string]*dynamodb.AttributeValue{"float": {N: aws.String("3.14")}},
},
{
input: map[string]interface{}{"float": math.MaxFloat32},
expected: map[string]*dynamodb.AttributeValue{"float": {N: aws.String("340282346638528860000000000000000000000")}},
},
{
input: map[string]interface{}{"float": math.MaxFloat64},
expected: map[string]*dynamodb.AttributeValue{"float": {N: aws.String("179769313486231570000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000")}},
},
{
input: map[string]interface{}{"int": int(12)},
expected: map[string]*dynamodb.AttributeValue{"int": {N: aws.String("12")}},
},
// List
{
input: map[string]interface{}{"list": []interface{}{"a string", 12, 3.14, true, nil, false}},
expected: map[string]*dynamodb.AttributeValue{
"list": {
L: []*dynamodb.AttributeValue{
{S: aws.String("a string")},
{N: aws.String("12")},
{N: aws.String("3.14")},
{BOOL: &trueValue},
{NULL: &trueValue},
{BOOL: &falseValue},
},
},
},
},
// Map
{
input: map[string]interface{}{"map": map[string]interface{}{"nestedint": 12}},
expected: map[string]*dynamodb.AttributeValue{
"map": {
M: map[string]*dynamodb.AttributeValue{
"nestedint": {
N: aws.String("12"),
},
},
},
},
},
// Structs
{
input: mySimpleStruct{},
expected: map[string]*dynamodb.AttributeValue{
"Bool": {BOOL: &falseValue},
"Float32": {N: aws.String("0")},
"Float64": {N: aws.String("0")},
"Int": {N: aws.String("0")},
"Null": {NULL: &trueValue},
"String": {S: aws.String("")},
"Uint": {N: aws.String("0")},
},
inputType: "mySimpleStruct",
},
{
input: myComplexStruct{},
expected: map[string]*dynamodb.AttributeValue{
"Simple": {NULL: &trueValue},
},
inputType: "myComplexStruct",
},
{
input: myComplexStruct{Simple: []mySimpleStruct{{Int: -2}, {Uint: 5}}},
expected: map[string]*dynamodb.AttributeValue{
"Simple": {
L: []*dynamodb.AttributeValue{
{
M: map[string]*dynamodb.AttributeValue{
"Bool": {BOOL: &falseValue},
"Float32": {N: aws.String("0")},
"Float64": {N: aws.String("0")},
"Int": {N: aws.String("-2")},
"Null": {NULL: &trueValue},
"String": {S: aws.String("")},
"Uint": {N: aws.String("0")},
},
},
{
M: map[string]*dynamodb.AttributeValue{
"Bool": {BOOL: &falseValue},
"Float32": {N: aws.String("0")},
"Float64": {N: aws.String("0")},
"Int": {N: aws.String("0")},
"Null": {NULL: &trueValue},
"String": {S: aws.String("")},
"Uint": {N: aws.String("5")},
},
},
},
},
},
inputType: "myComplexStruct",
},
}
var converterListTestInputs = []converterTestInput{
{
input: nil,
err: awserr.New("SerializationError", "in must be an array or slice, got <nil>", nil),
},
{
input: []interface{}{},
expected: []*dynamodb.AttributeValue{},
},
{
input: []interface{}{"a string", 12, 3.14, true, nil, false},
expected: []*dynamodb.AttributeValue{
{S: aws.String("a string")},
{N: aws.String("12")},
{N: aws.String("3.14")},
{BOOL: &trueValue},
{NULL: &trueValue},
{BOOL: &falseValue},
},
},
{
input: []mySimpleStruct{{}},
expected: []*dynamodb.AttributeValue{
{
M: map[string]*dynamodb.AttributeValue{
"Bool": {BOOL: &falseValue},
"Float32": {N: aws.String("0")},
"Float64": {N: aws.String("0")},
"Int": {N: aws.String("0")},
"Null": {NULL: &trueValue},
"String": {S: aws.String("")},
"Uint": {N: aws.String("0")},
},
},
},
inputType: "mySimpleStruct",
},
}
func TestConvertTo(t *testing.T) {
for _, test := range converterScalarInputs {
testConvertTo(t, test)
}
}
func testConvertTo(t *testing.T, test converterTestInput) {
actual, err := ConvertTo(test.input)
if test.err != nil {
if err == nil {
t.Errorf("ConvertTo with input %#v retured %#v, expected error `%s`", test.input, actual, test.err)
} else if err.Error() != test.err.Error() {
t.Errorf("ConvertTo with input %#v retured error `%s`, expected error `%s`", test.input, err, test.err)
}
} else {
if err != nil {
t.Errorf("ConvertTo with input %#v retured error `%s`", test.input, err)
}
compareObjects(t, test.expected, actual)
}
}
func TestConvertFrom(t *testing.T) {
// Using the same inputs from TestConvertTo, test the reverse mapping.
for _, test := range converterScalarInputs {
if test.expected != nil {
testConvertFrom(t, test)
}
}
}
func testConvertFrom(t *testing.T, test converterTestInput) {
switch test.inputType {
case "mySimpleStruct":
var actual mySimpleStruct
if err := ConvertFrom(test.expected.(*dynamodb.AttributeValue), &actual); err != nil {
t.Errorf("ConvertFrom with input %#v retured error `%s`", test.expected, err)
}
compareObjects(t, test.input, actual)
case "myComplexStruct":
var actual myComplexStruct
if err := ConvertFrom(test.expected.(*dynamodb.AttributeValue), &actual); err != nil {
t.Errorf("ConvertFrom with input %#v retured error `%s`", test.expected, err)
}
compareObjects(t, test.input, actual)
default:
var actual interface{}
if err := ConvertFrom(test.expected.(*dynamodb.AttributeValue), &actual); err != nil {
t.Errorf("ConvertFrom with input %#v retured error `%s`", test.expected, err)
}
compareObjects(t, test.input, actual)
}
}
func TestConvertFromError(t *testing.T) {
// Test that we get an error using ConvertFrom to convert to a map.
var actual map[string]interface{}
expected := awserr.New("SerializationError", `v must be a non-nil pointer to an interface{} or struct, got *map[string]interface {}`, nil).Error()
if err := ConvertFrom(nil, &actual); err == nil {
t.Errorf("ConvertFrom with input %#v returned no error, expected error `%s`", nil, expected)
} else if err.Error() != expected {
t.Errorf("ConvertFrom with input %#v returned error `%s`, expected error `%s`", nil, err, expected)
}
// Test that we get an error using ConvertFrom to convert to a list.
var actual2 []interface{}
expected = awserr.New("SerializationError", `v must be a non-nil pointer to an interface{} or struct, got *[]interface {}`, nil).Error()
if err := ConvertFrom(nil, &actual2); err == nil {
t.Errorf("ConvertFrom with input %#v returned no error, expected error `%s`", nil, expected)
} else if err.Error() != expected {
t.Errorf("ConvertFrom with input %#v returned error `%s`, expected error `%s`", nil, err, expected)
}
}
func TestConvertToMap(t *testing.T) {
for _, test := range converterMapTestInputs {
testConvertToMap(t, test)
}
}
func testConvertToMap(t *testing.T, test converterTestInput) {
actual, err := ConvertToMap(test.input)
if test.err != nil {
if err == nil {
t.Errorf("ConvertToMap with input %#v retured %#v, expected error `%s`", test.input, actual, test.err)
} else if err.Error() != test.err.Error() {
t.Errorf("ConvertToMap with input %#v retured error `%s`, expected error `%s`", test.input, err, test.err)
}
} else {
if err != nil {
t.Errorf("ConvertToMap with input %#v retured error `%s`", test.input, err)
}
compareObjects(t, test.expected, actual)
}
}
func TestConvertFromMap(t *testing.T) {
// Using the same inputs from TestConvertToMap, test the reverse mapping.
for _, test := range converterMapTestInputs {
if test.expected != nil {
testConvertFromMap(t, test)
}
}
}
func testConvertFromMap(t *testing.T, test converterTestInput) {
switch test.inputType {
case "mySimpleStruct":
var actual mySimpleStruct
if err := ConvertFromMap(test.expected.(map[string]*dynamodb.AttributeValue), &actual); err != nil {
t.Errorf("ConvertFromMap with input %#v retured error `%s`", test.expected, err)
}
compareObjects(t, test.input, actual)
case "myComplexStruct":
var actual myComplexStruct
if err := ConvertFromMap(test.expected.(map[string]*dynamodb.AttributeValue), &actual); err != nil {
t.Errorf("ConvertFromMap with input %#v retured error `%s`", test.expected, err)
}
compareObjects(t, test.input, actual)
default:
var actual map[string]interface{}
if err := ConvertFromMap(test.expected.(map[string]*dynamodb.AttributeValue), &actual); err != nil {
t.Errorf("ConvertFromMap with input %#v retured error `%s`", test.expected, err)
}
compareObjects(t, test.input, actual)
}
}
func TestConvertFromMapError(t *testing.T) {
// Test that we get an error using ConvertFromMap to convert to an interface{}.
var actual interface{}
expected := awserr.New("SerializationError", `v must be a non-nil pointer to a map[string]interface{} or struct, got *interface {}`, nil).Error()
if err := ConvertFromMap(nil, &actual); err == nil {
t.Errorf("ConvertFromMap with input %#v returned no error, expected error `%s`", nil, expected)
} else if err.Error() != expected {
t.Errorf("ConvertFromMap with input %#v returned error `%s`, expected error `%s`", nil, err, expected)
}
// Test that we get an error using ConvertFromMap to convert to a slice.
var actual2 []interface{}
expected = awserr.New("SerializationError", `v must be a non-nil pointer to a map[string]interface{} or struct, got *[]interface {}`, nil).Error()
if err := ConvertFromMap(nil, &actual2); err == nil {
t.Errorf("ConvertFromMap with input %#v returned no error, expected error `%s`", nil, expected)
} else if err.Error() != expected {
t.Errorf("ConvertFromMap with input %#v returned error `%s`, expected error `%s`", nil, err, expected)
}
}
func TestConvertToList(t *testing.T) {
for _, test := range converterListTestInputs {
testConvertToList(t, test)
}
}
func testConvertToList(t *testing.T, test converterTestInput) {
actual, err := ConvertToList(test.input)
if test.err != nil {
if err == nil {
t.Errorf("ConvertToList with input %#v retured %#v, expected error `%s`", test.input, actual, test.err)
} else if err.Error() != test.err.Error() {
t.Errorf("ConvertToList with input %#v retured error `%s`, expected error `%s`", test.input, err, test.err)
}
} else {
if err != nil {
t.Errorf("ConvertToList with input %#v retured error `%s`", test.input, err)
}
compareObjects(t, test.expected, actual)
}
}
func TestConvertFromList(t *testing.T) {
// Using the same inputs from TestConvertToList, test the reverse mapping.
for _, test := range converterListTestInputs {
if test.expected != nil {
testConvertFromList(t, test)
}
}
}
func testConvertFromList(t *testing.T, test converterTestInput) {
switch test.inputType {
case "mySimpleStruct":
var actual []mySimpleStruct
if err := ConvertFromList(test.expected.([]*dynamodb.AttributeValue), &actual); err != nil {
t.Errorf("ConvertFromList with input %#v retured error `%s`", test.expected, err)
}
compareObjects(t, test.input, actual)
case "myComplexStruct":
var actual []myComplexStruct
if err := ConvertFromList(test.expected.([]*dynamodb.AttributeValue), &actual); err != nil {
t.Errorf("ConvertFromList with input %#v retured error `%s`", test.expected, err)
}
compareObjects(t, test.input, actual)
default:
var actual []interface{}
if err := ConvertFromList(test.expected.([]*dynamodb.AttributeValue), &actual); err != nil {
t.Errorf("ConvertFromList with input %#v retured error `%s`", test.expected, err)
}
compareObjects(t, test.input, actual)
}
}
func TestConvertFromListError(t *testing.T) {
// Test that we get an error using ConvertFromList to convert to a map.
var actual map[string]interface{}
expected := awserr.New("SerializationError", `v must be a non-nil pointer to an array or slice, got *map[string]interface {}`, nil).Error()
if err := ConvertFromList(nil, &actual); err == nil {
t.Errorf("ConvertFromList with input %#v returned no error, expected error `%s`", nil, expected)
} else if err.Error() != expected {
t.Errorf("ConvertFromList with input %#v returned error `%s`, expected error `%s`", nil, err, expected)
}
// Test that we get an error using ConvertFromList to convert to a struct.
var actual2 myComplexStruct
expected = awserr.New("SerializationError", `v must be a non-nil pointer to an array or slice, got *dynamodbattribute.myComplexStruct`, nil).Error()
if err := ConvertFromList(nil, &actual2); err == nil {
t.Errorf("ConvertFromList with input %#v returned no error, expected error `%s`", nil, expected)
} else if err.Error() != expected {
t.Errorf("ConvertFromList with input %#v returned error `%s`, expected error `%s`", nil, err, expected)
}
// Test that we get an error using ConvertFromList to convert to an interface{}.
var actual3 interface{}
expected = awserr.New("SerializationError", `v must be a non-nil pointer to an array or slice, got *interface {}`, nil).Error()
if err := ConvertFromList(nil, &actual3); err == nil {
t.Errorf("ConvertFromList with input %#v returned no error, expected error `%s`", nil, expected)
} else if err.Error() != expected {
t.Errorf("ConvertFromList with input %#v returned error `%s`, expected error `%s`", nil, err, expected)
}
}
func compareObjects(t *testing.T, expected interface{}, actual interface{}) {
if !reflect.DeepEqual(expected, actual) {
t.Errorf("\nExpected %s:\n%s\nActual %s:\n%s\n",
reflect.ValueOf(expected).Kind(),
awsutil.Prettify(expected),
reflect.ValueOf(actual).Kind(),
awsutil.Prettify(actual))
}
}

View File

@@ -1,79 +0,0 @@
package dynamodbattribute_test
import (
"fmt"
"github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute"
"reflect"
)
func ExampleConvertTo() {
type Record struct {
MyField string
Letters []string
Numbers []int
}
r := Record{
MyField: "MyFieldValue",
Letters: []string{"a", "b", "c", "d"},
Numbers: []int{1, 2, 3},
}
av, err := dynamodbattribute.ConvertTo(r)
fmt.Println("err", err)
fmt.Println("MyField", av.M["MyField"])
fmt.Println("Letters", av.M["Letters"])
fmt.Println("Numbers", av.M["Numbers"])
// Output:
// err <nil>
// MyField {
// S: "MyFieldValue"
// }
// Letters {
// L: [
// {
// S: "a"
// },
// {
// S: "b"
// },
// {
// S: "c"
// },
// {
// S: "d"
// }
// ]
// }
// Numbers {
// L: [{
// N: "1"
// },{
// N: "2"
// },{
// N: "3"
// }]
// }
}
func ExampleConvertFrom() {
type Record struct {
MyField string
Letters []string
A2Num map[string]int
}
r := Record{
MyField: "MyFieldValue",
Letters: []string{"a", "b", "c", "d"},
A2Num: map[string]int{"a": 1, "b": 2, "c": 3},
}
av, err := dynamodbattribute.ConvertTo(r)
r2 := Record{}
err = dynamodbattribute.ConvertFrom(av, &r2)
fmt.Println(err, reflect.DeepEqual(r, r2))
// Output:
// <nil> true
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -138,6 +138,10 @@ type EC2API interface {
CreateKeyPair(*ec2.CreateKeyPairInput) (*ec2.CreateKeyPairOutput, error)
CreateNatGatewayRequest(*ec2.CreateNatGatewayInput) (*request.Request, *ec2.CreateNatGatewayOutput)
CreateNatGateway(*ec2.CreateNatGatewayInput) (*ec2.CreateNatGatewayOutput, error)
CreateNetworkAclRequest(*ec2.CreateNetworkAclInput) (*request.Request, *ec2.CreateNetworkAclOutput)
CreateNetworkAcl(*ec2.CreateNetworkAclInput) (*ec2.CreateNetworkAclOutput, error)
@@ -234,6 +238,10 @@ type EC2API interface {
DeleteKeyPair(*ec2.DeleteKeyPairInput) (*ec2.DeleteKeyPairOutput, error)
DeleteNatGatewayRequest(*ec2.DeleteNatGatewayInput) (*request.Request, *ec2.DeleteNatGatewayOutput)
DeleteNatGateway(*ec2.DeleteNatGatewayInput) (*ec2.DeleteNatGatewayOutput, error)
DeleteNetworkAclRequest(*ec2.DeleteNetworkAclInput) (*request.Request, *ec2.DeleteNetworkAclOutput)
DeleteNetworkAcl(*ec2.DeleteNetworkAclInput) (*ec2.DeleteNetworkAclOutput, error)
@@ -402,6 +410,10 @@ type EC2API interface {
DescribeMovingAddresses(*ec2.DescribeMovingAddressesInput) (*ec2.DescribeMovingAddressesOutput, error)
DescribeNatGatewaysRequest(*ec2.DescribeNatGatewaysInput) (*request.Request, *ec2.DescribeNatGatewaysOutput)
DescribeNatGateways(*ec2.DescribeNatGatewaysInput) (*ec2.DescribeNatGatewaysOutput, error)
DescribeNetworkAclsRequest(*ec2.DescribeNetworkAclsInput) (*request.Request, *ec2.DescribeNetworkAclsOutput)
DescribeNetworkAcls(*ec2.DescribeNetworkAclsInput) (*ec2.DescribeNetworkAclsOutput, error)
@@ -524,6 +536,10 @@ type EC2API interface {
DescribeVpcClassicLink(*ec2.DescribeVpcClassicLinkInput) (*ec2.DescribeVpcClassicLinkOutput, error)
DescribeVpcClassicLinkDnsSupportRequest(*ec2.DescribeVpcClassicLinkDnsSupportInput) (*request.Request, *ec2.DescribeVpcClassicLinkDnsSupportOutput)
DescribeVpcClassicLinkDnsSupport(*ec2.DescribeVpcClassicLinkDnsSupportInput) (*ec2.DescribeVpcClassicLinkDnsSupportOutput, error)
DescribeVpcEndpointServicesRequest(*ec2.DescribeVpcEndpointServicesInput) (*request.Request, *ec2.DescribeVpcEndpointServicesOutput)
DescribeVpcEndpointServices(*ec2.DescribeVpcEndpointServicesInput) (*ec2.DescribeVpcEndpointServicesOutput, error)
@@ -576,6 +592,10 @@ type EC2API interface {
DisableVpcClassicLink(*ec2.DisableVpcClassicLinkInput) (*ec2.DisableVpcClassicLinkOutput, error)
DisableVpcClassicLinkDnsSupportRequest(*ec2.DisableVpcClassicLinkDnsSupportInput) (*request.Request, *ec2.DisableVpcClassicLinkDnsSupportOutput)
DisableVpcClassicLinkDnsSupport(*ec2.DisableVpcClassicLinkDnsSupportInput) (*ec2.DisableVpcClassicLinkDnsSupportOutput, error)
DisassociateAddressRequest(*ec2.DisassociateAddressInput) (*request.Request, *ec2.DisassociateAddressOutput)
DisassociateAddress(*ec2.DisassociateAddressInput) (*ec2.DisassociateAddressOutput, error)
@@ -596,6 +616,10 @@ type EC2API interface {
EnableVpcClassicLink(*ec2.EnableVpcClassicLinkInput) (*ec2.EnableVpcClassicLinkOutput, error)
EnableVpcClassicLinkDnsSupportRequest(*ec2.EnableVpcClassicLinkDnsSupportInput) (*request.Request, *ec2.EnableVpcClassicLinkDnsSupportOutput)
EnableVpcClassicLinkDnsSupport(*ec2.EnableVpcClassicLinkDnsSupportInput) (*ec2.EnableVpcClassicLinkDnsSupportOutput, error)
GetConsoleOutputRequest(*ec2.GetConsoleOutputInput) (*request.Request, *ec2.GetConsoleOutputOutput)
GetConsoleOutput(*ec2.GetConsoleOutputInput) (*ec2.GetConsoleOutputOutput, error)

View File

@@ -1180,6 +1180,11 @@ func (c *IAM) DeleteServerCertificateRequest(input *DeleteServerCertificateInput
// Deletes the specified server certificate.
//
// For more information about working with server certificates, including a
// list of AWS services that can use the server certificates that you manage
// with IAM, go to Working with Server Certificates (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_server-certs.html)
// in the IAM User Guide.
//
// If you are using a server certificate with Elastic Load Balancing, deleting
// the certificate could have implications for your application. If Elastic
// Load Balancing doesn't detect the deletion of bound certificates, it may
@@ -2114,6 +2119,11 @@ func (c *IAM) GetServerCertificateRequest(input *GetServerCertificateInput) (req
}
// Retrieves information about the specified server certificate.
//
// For more information about working with server certificates, including a
// list of AWS services that can use the server certificates that you manage
// with IAM, go to Working with Server Certificates (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_server-certs.html)
// in the IAM User Guide.
func (c *IAM) GetServerCertificate(input *GetServerCertificateInput) (*GetServerCertificateOutput, error) {
req, out := c.GetServerCertificateRequest(input)
err := req.Send()
@@ -3069,6 +3079,11 @@ func (c *IAM) ListServerCertificatesRequest(input *ListServerCertificatesInput)
// exist, the action returns an empty list.
//
// You can paginate the results using the MaxItems and Marker parameters.
//
// For more information about working with server certificates, including a
// list of AWS services that can use the server certificates that you manage
// with IAM, go to Working with Server Certificates (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_server-certs.html)
// in the IAM User Guide.
func (c *IAM) ListServerCertificates(input *ListServerCertificatesInput) (*ListServerCertificatesOutput, error) {
req, out := c.ListServerCertificatesRequest(input)
err := req.Send()
@@ -3963,15 +3978,20 @@ func (c *IAM) UpdateServerCertificateRequest(input *UpdateServerCertificateInput
// Updates the name and/or the path of the specified server certificate.
//
// You should understand the implications of changing a server certificate's
// path or name. For more information, see Managing Server Certificates (http://docs.aws.amazon.com/IAM/latest/UserGuide/ManagingServerCerts.html)
// in the IAM User Guide. To change a server certificate name the requester
// For more information about working with server certificates, including a
// list of AWS services that can use the server certificates that you manage
// with IAM, go to Working with Server Certificates (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_server-certs.html)
// in the IAM User Guide.
//
// You should understand the implications of changing a server certificate's
// path or name. For more information, see Renaming a Server Certificate (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_server-certs_manage.html#RenamingServerCerts)
// in the IAM User Guide. To change a server certificate name the requester
// must have appropriate permissions on both the source object and the target
// object. For example, to change the name from ProductionCert to ProdCert,
// the entity making the request must have permission on ProductionCert and
// ProdCert, or must have permission on all (*). For more information about
// permissions, see Permissions and Policies (http://docs.aws.amazon.com/IAM/latest/UserGuide/PermissionsAndPolicies.html"
// target="blank).
// permissions, see Access Management (http://docs.aws.amazon.com/IAM/latest/UserGuide/access.html)
// in the IAM User Guide.
func (c *IAM) UpdateServerCertificate(input *UpdateServerCertificateInput) (*UpdateServerCertificateOutput, error) {
req, out := c.UpdateServerCertificateRequest(input)
err := req.Send()
@@ -4105,8 +4125,13 @@ func (c *IAM) UploadServerCertificateRequest(input *UploadServerCertificateInput
// entity includes a public key certificate, a private key, and an optional
// certificate chain, which should all be PEM-encoded.
//
// For more information about working with server certificates, including a
// list of AWS services that can use the server certificates that you manage
// with IAM, go to Working with Server Certificates (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_server-certs.html)
// in the IAM User Guide.
//
// For information about the number of server certificates you can upload,
// see Limitations on IAM Entities (http://docs.aws.amazon.com/IAM/latest/UserGuide/LimitationsOnEntities.html)
// see Limitations on IAM Entities and Objects (http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_iam-limits.html)
// in the IAM User Guide.
//
// Because the body of the public key certificate, private key, and the certificate
@@ -4114,7 +4139,7 @@ func (c *IAM) UploadServerCertificateRequest(input *UploadServerCertificateInput
// For information about setting up signatures and authorization through the
// API, go to Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html)
// in the AWS General Reference. For general information about using the Query
// API with IAM, go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html)
// API with IAM, go to Calling the API by Making HTTP Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/programming.html)
// in the IAM User Guide.
func (c *IAM) UploadServerCertificate(input *UploadServerCertificateInput) (*UploadServerCertificateOutput, error) {
req, out := c.UploadServerCertificateRequest(input)

View File

@@ -21,7 +21,7 @@ func init() {
initRequest = func(r *request.Request) {
switch r.Operation.Name {
case opPutBucketCors, opPutBucketLifecycle, opPutBucketPolicy, opPutBucketTagging, opDeleteObjects:
case opPutBucketCors, opPutBucketLifecycle, opPutBucketPolicy, opPutBucketTagging, opDeleteObjects, opPutBucketLifecycleConfiguration:
// These S3 operations require Content-MD5 to be set
r.Handlers.Build.PushBack(contentMD5)
case opGetBucketLocation:

View File

@@ -0,0 +1,23 @@
// Package s3manageriface provides an interface for the s3manager package
package s3manageriface
import (
"io"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
)
// DownloaderAPI is the interface type for s3manager.Downloader.
type DownloaderAPI interface {
Download(io.WriterAt, *s3.GetObjectInput, ...func(*s3manager.Downloader)) (int64, error)
}
var _ DownloaderAPI = (*s3manager.Downloader)(nil)
// UploaderAPI is the interface type for s3manager.Uploader.
type UploaderAPI interface {
Upload(*s3manager.UploadInput, ...func(*s3manager.Uploader)) (*s3manager.UploadOutput, error)
}
var _ UploaderAPI = (*s3manager.Uploader)(nil)

View File

@@ -22,17 +22,23 @@ func unmarshalError(r *request.Request) {
defer r.HTTPResponse.Body.Close()
if r.HTTPResponse.StatusCode == http.StatusMovedPermanently {
r.Error = awserr.New("BucketRegionError",
fmt.Sprintf("incorrect region, the bucket is not in '%s' region", aws.StringValue(r.Config.Region)), nil)
r.Error = awserr.NewRequestFailure(
awserr.New("BucketRegionError",
fmt.Sprintf("incorrect region, the bucket is not in '%s' region",
aws.StringValue(r.Config.Region)),
nil),
r.HTTPResponse.StatusCode,
r.RequestID,
)
return
}
if r.HTTPResponse.ContentLength == int64(0) {
if r.HTTPResponse.ContentLength <= 1 {
// No body, use status code to generate an awserr.Error
r.Error = awserr.NewRequestFailure(
awserr.New(strings.Replace(r.HTTPResponse.Status, " ", "", -1), r.HTTPResponse.Status, nil),
r.HTTPResponse.StatusCode,
"",
r.RequestID,
)
return
}
@@ -45,7 +51,7 @@ func unmarshalError(r *request.Request) {
r.Error = awserr.NewRequestFailure(
awserr.New(resp.Code, resp.Message, nil),
r.HTTPResponse.StatusCode,
"",
r.RequestID,
)
}
}

View File

@@ -0,0 +1,2 @@
example/example
example/example.exe

View File

@@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [2013] [the CloudFoundry Authors]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -0,0 +1,30 @@
# Speakeasy
This package provides cross-platform Go (#golang) helpers for taking user input
from the terminal while not echoing the input back (similar to `getpasswd`). The
package uses syscalls to avoid any dependence on cgo, and is therefore
compatible with cross-compiling.
[![GoDoc](https://godoc.org/github.com/bgentry/speakeasy?status.png)][godoc]
## Unicode
Multi-byte unicode characters work successfully on Mac OS X. On Windows,
however, this may be problematic (as is UTF in general on Windows). Other
platforms have not been tested.
## License
The code herein was not written by me, but was compiled from two separate open
source packages. Unix portions were imported from [gopass][gopass], while
Windows portions were imported from the [CloudFoundry Go CLI][cf-cli]'s
[Windows terminal helpers][cf-ui-windows].
The [license for the windows portion](./LICENSE_WINDOWS) has been copied exactly
from the source (though I attempted to fill in the correct owner in the
boilerplate copyright notice).
[cf-cli]: https://github.com/cloudfoundry/cli "CloudFoundry Go CLI"
[cf-ui-windows]: https://github.com/cloudfoundry/cli/blob/master/src/cf/terminal/ui_windows.go "CloudFoundry Go CLI Windows input helpers"
[godoc]: https://godoc.org/github.com/bgentry/speakeasy "speakeasy on Godoc.org"
[gopass]: https://code.google.com/p/gopass "gopass"

View File

@@ -0,0 +1,18 @@
package main
import (
"fmt"
"os"
"github.com/bgentry/speakeasy"
)
func main() {
password, err := speakeasy.Ask("Please enter a password: ")
if err != nil {
fmt.Println(err)
os.Exit(1)
}
fmt.Printf("Password result: %q\n", password)
fmt.Printf("Password len: %d\n", len(password))
}

View File

@@ -0,0 +1,47 @@
package speakeasy
import (
"fmt"
"io"
"os"
"strings"
)
// Ask the user to enter a password with input hidden. prompt is a string to
// display before the user's input. Returns the provided password, or an error
// if the command failed.
func Ask(prompt string) (password string, err error) {
return FAsk(os.Stdout, prompt)
}
// Same as the Ask function, except it is possible to specify the file to write
// the prompt to.
func FAsk(file *os.File, prompt string) (password string, err error) {
if prompt != "" {
fmt.Fprint(file, prompt) // Display the prompt.
}
password, err = getPassword()
// Carriage return after the user input.
fmt.Fprintln(file, "")
return
}
func readline() (value string, err error) {
var valb []byte
var n int
b := make([]byte, 1)
for {
// read one byte at a time so we don't accidentally read extra bytes
n, err = os.Stdin.Read(b)
if err != nil && err != io.EOF {
return "", err
}
if n == 0 || b[0] == '\n' {
break
}
valb = append(valb, b[0])
}
return strings.TrimSuffix(string(valb), "\r"), nil
}

View File

@@ -0,0 +1,93 @@
// based on https://code.google.com/p/gopass
// Author: johnsiilver@gmail.com (John Doak)
//
// Original code is based on code by RogerV in the golang-nuts thread:
// https://groups.google.com/group/golang-nuts/browse_thread/thread/40cc41e9d9fc9247
// +build darwin freebsd linux netbsd openbsd solaris
package speakeasy
import (
"fmt"
"os"
"os/signal"
"strings"
"syscall"
)
const sttyArg0 = "/bin/stty"
var (
sttyArgvEOff = []string{"stty", "-echo"}
sttyArgvEOn = []string{"stty", "echo"}
)
// getPassword gets input hidden from the terminal from a user. This is
// accomplished by turning off terminal echo, reading input from the user and
// finally turning on terminal echo.
func getPassword() (password string, err error) {
sig := make(chan os.Signal, 10)
brk := make(chan bool)
// File descriptors for stdin, stdout, and stderr.
fd := []uintptr{os.Stdin.Fd(), os.Stdout.Fd(), os.Stderr.Fd()}
// Setup notifications of termination signals to channel sig, create a process to
// watch for these signals so we can turn back on echo if need be.
signal.Notify(sig, syscall.SIGHUP, syscall.SIGINT, syscall.SIGKILL, syscall.SIGQUIT,
syscall.SIGTERM)
go catchSignal(fd, sig, brk)
// Turn off the terminal echo.
pid, err := echoOff(fd)
if err != nil {
return "", err
}
// Turn on the terminal echo and stop listening for signals.
defer signal.Stop(sig)
defer close(brk)
defer echoOn(fd)
syscall.Wait4(pid, nil, 0, nil)
line, err := readline()
if err == nil {
password = strings.TrimSpace(line)
} else {
err = fmt.Errorf("failed during password entry: %s", err)
}
return password, err
}
// echoOff turns off the terminal echo.
func echoOff(fd []uintptr) (int, error) {
pid, err := syscall.ForkExec(sttyArg0, sttyArgvEOff, &syscall.ProcAttr{Dir: "", Files: fd})
if err != nil {
return 0, fmt.Errorf("failed turning off console echo for password entry:\n\t%s", err)
}
return pid, nil
}
// echoOn turns back on the terminal echo.
func echoOn(fd []uintptr) {
// Turn on the terminal echo.
pid, e := syscall.ForkExec(sttyArg0, sttyArgvEOn, &syscall.ProcAttr{Dir: "", Files: fd})
if e == nil {
syscall.Wait4(pid, nil, 0, nil)
}
}
// catchSignal tries to catch SIGKILL, SIGQUIT and SIGINT so that we can turn
// terminal echo back on before the program ends. Otherwise the user is left
// with echo off on their terminal.
func catchSignal(fd []uintptr, sig chan os.Signal, brk chan bool) {
select {
case <-sig:
echoOn(fd)
os.Exit(-1)
case <-brk:
}
}

View File

@@ -0,0 +1,43 @@
// +build windows
package speakeasy
import (
"os"
"syscall"
)
// SetConsoleMode function can be used to change value of ENABLE_ECHO_INPUT:
// http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx
const ENABLE_ECHO_INPUT = 0x0004
func getPassword() (password string, err error) {
hStdin := syscall.Handle(os.Stdin.Fd())
var oldMode uint32
err = syscall.GetConsoleMode(hStdin, &oldMode)
if err != nil {
return
}
var newMode uint32 = (oldMode &^ ENABLE_ECHO_INPUT)
err = setConsoleMode(hStdin, newMode)
defer setConsoleMode(hStdin, oldMode)
if err != nil {
return
}
return readline()
}
func setConsoleMode(console syscall.Handle, mode uint32) (err error) {
dll := syscall.MustLoadDLL("kernel32")
proc := dll.MustFindProc("SetConsoleMode")
r, _, err := proc.Call(uintptr(console), uintptr(mode))
if r == 0 {
return err
}
return nil
}

View File

@@ -77,7 +77,7 @@ If the response gets from the cluster is invalid, a plain string error will be r
Here is the example code to handle client errors:
```go
cfg := client.Config{Endpoints: []string{"http://etcd1:2379,http://etcd2:2379,http://etcd3:2379"}}
cfg := client.Config{Endpoints: []string{"http://etcd1:2379","http://etcd2:2379","http://etcd3:2379"}}
c, err := client.New(cfg)
if err != nil {
log.Fatal(err)

View File

@@ -115,14 +115,13 @@ func (r *httpAuthRoleAPI) ListRoles(ctx context.Context) ([]string, error) {
if err != nil {
return nil, err
}
if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
return nil, err
}
var userList struct {
Roles []string `json:"roles"`
}
err = json.Unmarshal(body, &userList)
if err != nil {
if err = json.Unmarshal(body, &userList); err != nil {
return nil, err
}
return userList.Roles, nil
@@ -218,17 +217,16 @@ func (r *httpAuthRoleAPI) modRole(ctx context.Context, req *authRoleAPIAction) (
if err != nil {
return nil, err
}
if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
var sec authError
err := json.Unmarshal(body, &sec)
err = json.Unmarshal(body, &sec)
if err != nil {
return nil, err
}
return nil, sec
}
var role Role
err = json.Unmarshal(body, &role)
if err != nil {
if err = json.Unmarshal(body, &role); err != nil {
return nil, err
}
return &role, nil

View File

@@ -78,9 +78,9 @@ func (s *httpAuthAPI) enableDisable(ctx context.Context, req httpAction) error {
if err != nil {
return err
}
if err := assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
var sec authError
err := json.Unmarshal(body, &sec)
err = json.Unmarshal(body, &sec)
if err != nil {
return err
}
@@ -179,9 +179,9 @@ func (u *httpAuthUserAPI) ListUsers(ctx context.Context) ([]string, error) {
if err != nil {
return nil, err
}
if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
var sec authError
err := json.Unmarshal(body, &sec)
err = json.Unmarshal(body, &sec)
if err != nil {
return nil, err
}
@@ -190,8 +190,7 @@ func (u *httpAuthUserAPI) ListUsers(ctx context.Context) ([]string, error) {
var userList struct {
Users []string `json:"users"`
}
err = json.Unmarshal(body, &userList)
if err != nil {
if err = json.Unmarshal(body, &userList); err != nil {
return nil, err
}
return userList.Users, nil
@@ -221,9 +220,9 @@ func (u *httpAuthUserAPI) addRemoveUser(ctx context.Context, req *authUserAPIAct
if err != nil {
return err
}
if err := assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil {
var sec authError
err := json.Unmarshal(body, &sec)
err = json.Unmarshal(body, &sec)
if err != nil {
return err
}
@@ -280,17 +279,16 @@ func (u *httpAuthUserAPI) modUser(ctx context.Context, req *authUserAPIAction) (
if err != nil {
return nil, err
}
if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
var sec authError
err := json.Unmarshal(body, &sec)
err = json.Unmarshal(body, &sec)
if err != nil {
return nil, err
}
return nil, sec
}
var user User
err = json.Unmarshal(body, &user)
if err != nil {
if err = json.Unmarshal(body, &user); err != nil {
return nil, err
}
return &user, nil

View File

@@ -34,6 +34,7 @@ var (
ErrNoEndpoints = errors.New("client: no endpoints available")
ErrTooManyRedirects = errors.New("client: too many redirects")
ErrClusterUnavailable = errors.New("client: etcd cluster is unavailable or misconfigured")
ErrNoLeaderEndpoint = errors.New("client: no leader endpoint available")
errTooManyRedirectChecks = errors.New("client: too many redirect checks")
)
@@ -48,6 +49,19 @@ var DefaultTransport CancelableTransport = &http.Transport{
TLSHandshakeTimeout: 10 * time.Second,
}
type EndpointSelectionMode int
const (
// EndpointSelectionRandom is to pick an endpoint in a random manner.
EndpointSelectionRandom EndpointSelectionMode = iota
// EndpointSelectionPrioritizeLeader is to prioritize leader for reducing needless
// forward between follower and leader.
//
// This mode should be used with Client.AutoSync().
EndpointSelectionPrioritizeLeader
)
type Config struct {
// Endpoints defines a set of URLs (schemes, hosts and ports only)
// that can be used to communicate with a logical etcd cluster. For
@@ -104,6 +118,9 @@ type Config struct {
//
// A HeaderTimeoutPerRequest of zero means no timeout.
HeaderTimeoutPerRequest time.Duration
// SelectionMode specifies a way of selecting destination endpoint.
SelectionMode EndpointSelectionMode
}
func (cfg *Config) transport() CancelableTransport {
@@ -162,6 +179,11 @@ type Client interface {
// this may differ from the initial Endpoints provided in the Config.
Endpoints() []string
// SetEndpoints sets the set of API endpoints used by Client to resolve
// HTTP requests. If the given endpoints are not valid, an error will be
// returned
SetEndpoints(eps []string) error
httpClient
}
@@ -169,6 +191,7 @@ func New(cfg Config) (Client, error) {
c := &httpClusterClient{
clientFactory: newHTTPClientFactory(cfg.transport(), cfg.checkRedirect(), cfg.HeaderTimeoutPerRequest),
rand: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))),
selectionMode: cfg.SelectionMode,
}
if cfg.Username != "" {
c.credentials = &credentials{
@@ -176,7 +199,7 @@ func New(cfg Config) (Client, error) {
password: cfg.Password,
}
}
if err := c.reset(cfg.Endpoints); err != nil {
if err := c.SetEndpoints(cfg.Endpoints); err != nil {
return nil, err
}
return c, nil
@@ -216,10 +239,21 @@ type httpClusterClient struct {
pinned int
credentials *credentials
sync.RWMutex
rand *rand.Rand
rand *rand.Rand
selectionMode EndpointSelectionMode
}
func (c *httpClusterClient) reset(eps []string) error {
func (c *httpClusterClient) getLeaderEndpoint() (string, error) {
mAPI := NewMembersAPI(c)
leader, err := mAPI.Leader(context.Background())
if err != nil {
return "", err
}
return leader.ClientURLs[0], nil // TODO: how to handle multiple client URLs?
}
func (c *httpClusterClient) SetEndpoints(eps []string) error {
if len(eps) == 0 {
return ErrNoEndpoints
}
@@ -233,9 +267,28 @@ func (c *httpClusterClient) reset(eps []string) error {
neps[i] = *u
}
c.endpoints = shuffleEndpoints(c.rand, neps)
// TODO: pin old endpoint if possible, and rebalance when new endpoint appears
c.pinned = 0
switch c.selectionMode {
case EndpointSelectionRandom:
c.endpoints = shuffleEndpoints(c.rand, neps)
c.pinned = 0
case EndpointSelectionPrioritizeLeader:
c.endpoints = neps
lep, err := c.getLeaderEndpoint()
if err != nil {
return ErrNoLeaderEndpoint
}
for i := range c.endpoints {
if c.endpoints[i].String() == lep {
c.pinned = i
break
}
}
// If endpoints doesn't have the lu, just keep c.pinned = 0.
// Forwarding between follower and leader would be required but it works.
default:
return errors.New(fmt.Sprintf("invalid endpoint selection mode: %d", c.selectionMode))
}
return nil
}
@@ -341,7 +394,7 @@ func (c *httpClusterClient) Sync(ctx context.Context) error {
return nil
}
return c.reset(eps)
return c.SetEndpoints(eps)
}
func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration) error {

View File

@@ -29,6 +29,7 @@ import (
var (
defaultV2MembersPrefix = "/v2/members"
defaultLeaderSuffix = "/leader"
)
type Member struct {
@@ -105,6 +106,9 @@ type MembersAPI interface {
// Update instructs etcd to update an existing Member in the cluster.
Update(ctx context.Context, mID string, peerURLs []string) error
// Leader gets current leader of the cluster
Leader(ctx context.Context) (*Member, error)
}
type httpMembersAPI struct {
@@ -199,6 +203,25 @@ func (m *httpMembersAPI) Remove(ctx context.Context, memberID string) error {
return assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusGone)
}
func (m *httpMembersAPI) Leader(ctx context.Context) (*Member, error) {
req := &membersAPIActionLeader{}
resp, body, err := m.client.Do(ctx, req)
if err != nil {
return nil, err
}
if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil {
return nil, err
}
var leader Member
if err := json.Unmarshal(body, &leader); err != nil {
return nil, err
}
return &leader, nil
}
type membersAPIActionList struct{}
func (l *membersAPIActionList) HTTPRequest(ep url.URL) *http.Request {
@@ -255,6 +278,15 @@ func assertStatusCode(got int, want ...int) (err error) {
return fmt.Errorf("unexpected status code %d", got)
}
type membersAPIActionLeader struct{}
func (l *membersAPIActionLeader) HTTPRequest(ep url.URL) *http.Request {
u := v2MembersURL(ep)
u.Path = path.Join(u.Path, defaultLeaderSuffix)
req, _ := http.NewRequest("GET", u.String(), nil)
return req
}
// v2MembersURL add the necessary path to the provided endpoint
// to route requests to the default v2 members API.
func v2MembersURL(ep url.URL) *url.URL {

View File

@@ -0,0 +1,23 @@
// Copyright 2016 CoreOS, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package client
// IsKeyNotFound returns true if the error code is ErrorCodeKeyNotFound.
func IsKeyNotFound(err error) bool {
if cErr, ok := err.(Error); ok {
return cErr.Code == ErrorCodeKeyNotFound
}
return false
}

View File

@@ -21,17 +21,19 @@ import (
"time"
)
// NewKeepAliveListener returns a listener that listens on the given address.
// http://tldp.org/HOWTO/TCP-Keepalive-HOWTO/overview.html
func NewKeepAliveListener(addr string, scheme string, info TLSInfo) (net.Listener, error) {
l, err := net.Listen("tcp", addr)
if err != nil {
return nil, err
}
type keepAliveConn interface {
SetKeepAlive(bool) error
SetKeepAlivePeriod(d time.Duration) error
}
// NewKeepAliveListener returns a listener that listens on the given address.
// Be careful when wrap around KeepAliveListener with another Listener if TLSInfo is not nil.
// Some pkgs (like go/http) might expect Listener to return TLSConn type to start TLS handshake.
// http://tldp.org/HOWTO/TCP-Keepalive-HOWTO/overview.html
func NewKeepAliveListener(l net.Listener, scheme string, info TLSInfo) (net.Listener, error) {
if scheme == "https" {
if info.Empty() {
return nil, fmt.Errorf("cannot listen on TLS for %s: KeyFile and CertFile are not presented", scheme+"://"+addr)
return nil, fmt.Errorf("cannot listen on TLS for given listener: KeyFile and CertFile are not presented")
}
cfg, err := info.ServerConfig()
if err != nil {
@@ -53,13 +55,13 @@ func (kln *keepaliveListener) Accept() (net.Conn, error) {
if err != nil {
return nil, err
}
tcpc := c.(*net.TCPConn)
kac := c.(keepAliveConn)
// detection time: tcp_keepalive_time + tcp_keepalive_probes + tcp_keepalive_intvl
// default on linux: 30 + 8 * 30
// default on osx: 30 + 8 * 75
tcpc.SetKeepAlive(true)
tcpc.SetKeepAlivePeriod(30 * time.Second)
return tcpc, nil
kac.SetKeepAlive(true)
kac.SetKeepAlivePeriod(30 * time.Second)
return c, nil
}
// A tlsKeepaliveListener implements a network listener (net.Listener) for TLS connections.
@@ -75,12 +77,12 @@ func (l *tlsKeepaliveListener) Accept() (c net.Conn, err error) {
if err != nil {
return
}
tcpc := c.(*net.TCPConn)
kac := c.(keepAliveConn)
// detection time: tcp_keepalive_time + tcp_keepalive_probes + tcp_keepalive_intvl
// default on linux: 30 + 8 * 30
// default on osx: 30 + 8 * 75
tcpc.SetKeepAlive(true)
tcpc.SetKeepAlivePeriod(30 * time.Second)
kac.SetKeepAlive(true)
kac.SetKeepAlivePeriod(30 * time.Second)
c = tls.Server(c, l.config)
return
}

View File

@@ -26,7 +26,13 @@ import (
)
func NewListener(addr string, scheme string, info TLSInfo) (net.Listener, error) {
l, err := net.Listen("tcp", addr)
nettype := "tcp"
if scheme == "unix" {
// unix sockets via unix://laddr
nettype = scheme
}
l, err := net.Listen(nettype, addr)
if err != nil {
return nil, err
}

View File

@@ -41,7 +41,7 @@ func (f *Field) IsExported() bool {
return f.field.PkgPath == ""
}
// IsZero returns true if the given field is not initalized (has a zero value).
// IsZero returns true if the given field is not initialized (has a zero value).
// It panics if the field is not exported.
func (f *Field) IsZero() bool {
zero := reflect.Zero(f.value.Type()).Interface()
@@ -61,7 +61,7 @@ func (f *Field) Kind() reflect.Kind {
}
// Set sets the field to given value v. It retuns an error if the field is not
// settable (not addresable or not exported) or if the given value's type
// settable (not addressable or not exported) or if the given value's type
// doesn't match the fields type.
func (f *Field) Set(val interface{}) error {
// we can't set unexported fields, so be sure this field is exported

View File

@@ -22,8 +22,20 @@ Package ini provides INI file read and write functionality in Go.
## Installation
To use a tagged revision:
go get gopkg.in/ini.v1
To use with latest changes:
go get github.com/go-ini/ini
### Testing
If you want to test on your machine, please apply `-t` flag:
go get -t gopkg.in/ini.v1
## Getting Started
### Loading from data sources
@@ -155,8 +167,8 @@ To get value with types:
```go
// For boolean values:
// true when value is: 1, t, T, TRUE, true, True, YES, yes, Yes, ON, on, On
// false when value is: 0, f, F, FALSE, false, False, NO, no, No, OFF, off, Off
// true when value is: 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On
// false when value is: 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off
v, err = cfg.Section("").Key("BOOL").Bool()
v, err = cfg.Section("").Key("FLOAT64").Float64()
v, err = cfg.Section("").Key("INT").Int()

View File

@@ -15,8 +15,20 @@
## 下载安装
使用一个特定版本:
go get gopkg.in/ini.v1
使用最新版:
go get github.com/go-ini/ini
### 测试安装
如果您想要在自己的机器上运行测试,请使用 `-t` 标记:
go get -t gopkg.in/ini.v1
## 开始使用
### 从数据源加载
@@ -148,8 +160,8 @@ yes := cfg.Section("").HasValue("test value")
```go
// 布尔值的规则:
// true 当值为1, t, T, TRUE, true, True, YES, yes, Yes, ON, on, On
// false 当值为0, f, F, FALSE, false, False, NO, no, No, OFF, off, Off
// true 当值为1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On
// false 当值为0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off
v, err = cfg.Section("").Key("BOOL").Bool()
v, err = cfg.Section("").Key("FLOAT64").Float64()
v, err = cfg.Section("").Key("INT").Int()

View File

@@ -16,7 +16,6 @@
package ini
import (
"bufio"
"bytes"
"errors"
"fmt"
@@ -35,7 +34,7 @@ const (
// Maximum allowed depth when recursively substituing variable names.
_DEPTH_VALUES = 99
_VERSION = "1.7.0"
_VERSION = "1.8.6"
)
func Version() string {
@@ -164,14 +163,14 @@ func (k *Key) Validate(fn func(string) string) string {
// parseBool returns the boolean value represented by the string.
//
// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, ON, on, On,
// 0, f, F, FALSE, false, False, NO, no, No, OFF, off, Off.
// It accepts 1, t, T, TRUE, true, True, YES, yes, Yes, y, ON, on, On,
// 0, f, F, FALSE, false, False, NO, no, No, n, OFF, off, Off.
// Any other value returns an error.
func parseBool(str string) (value bool, err error) {
switch str {
case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "ON", "on", "On":
case "1", "t", "T", "true", "TRUE", "True", "YES", "yes", "Yes", "y", "ON", "on", "On":
return true, nil
case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "OFF", "off", "Off":
case "0", "f", "F", "false", "FALSE", "False", "NO", "no", "No", "n", "OFF", "off", "Off":
return false, nil
}
return false, fmt.Errorf("parsing \"%s\": invalid syntax", str)
@@ -454,7 +453,7 @@ func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time {
return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max)
}
// Strings returns list of string devide by given delimiter.
// Strings returns list of string divided by given delimiter.
func (k *Key) Strings(delim string) []string {
str := k.String()
if len(str) == 0 {
@@ -468,7 +467,7 @@ func (k *Key) Strings(delim string) []string {
return vals
}
// Float64s returns list of float64 devide by given delimiter.
// Float64s returns list of float64 divided by given delimiter.
func (k *Key) Float64s(delim string) []float64 {
strs := k.Strings(delim)
vals := make([]float64, len(strs))
@@ -478,7 +477,7 @@ func (k *Key) Float64s(delim string) []float64 {
return vals
}
// Ints returns list of int devide by given delimiter.
// Ints returns list of int divided by given delimiter.
func (k *Key) Ints(delim string) []int {
strs := k.Strings(delim)
vals := make([]int, len(strs))
@@ -488,7 +487,7 @@ func (k *Key) Ints(delim string) []int {
return vals
}
// Int64s returns list of int64 devide by given delimiter.
// Int64s returns list of int64 divided by given delimiter.
func (k *Key) Int64s(delim string) []int64 {
strs := k.Strings(delim)
vals := make([]int64, len(strs))
@@ -498,18 +497,18 @@ func (k *Key) Int64s(delim string) []int64 {
return vals
}
// Uints returns list of uint devide by given delimiter.
// Uints returns list of uint divided by given delimiter.
func (k *Key) Uints(delim string) []uint {
strs := k.Strings(delim)
vals := make([]uint, len(strs))
for i := range strs {
u, _ := strconv.ParseUint(strs[i], 10, 64)
u, _ := strconv.ParseUint(strs[i], 10, 0)
vals[i] = uint(u)
}
return vals
}
// Uint64s returns list of uint64 devide by given delimiter.
// Uint64s returns list of uint64 divided by given delimiter.
func (k *Key) Uint64s(delim string) []uint64 {
strs := k.Strings(delim)
vals := make([]uint64, len(strs))
@@ -519,7 +518,7 @@ func (k *Key) Uint64s(delim string) []uint64 {
return vals
}
// TimesFormat parses with given format and returns list of time.Time devide by given delimiter.
// TimesFormat parses with given format and returns list of time.Time divided by given delimiter.
func (k *Key) TimesFormat(format, delim string) []time.Time {
strs := k.Strings(delim)
vals := make([]time.Time, len(strs))
@@ -529,14 +528,20 @@ func (k *Key) TimesFormat(format, delim string) []time.Time {
return vals
}
// Times parses with RFC3339 format and returns list of time.Time devide by given delimiter.
// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter.
func (k *Key) Times(delim string) []time.Time {
return k.TimesFormat(time.RFC3339, delim)
}
// SetValue changes key value.
func (k *Key) SetValue(v string) {
if k.s.f.BlockMode {
k.s.f.lock.Lock()
defer k.s.f.lock.Unlock()
}
k.value = v
k.s.keysHash[k.name] = v
}
// _________ __ .__
@@ -619,12 +624,17 @@ func (s *Section) GetKey(name string) (*Key, error) {
}
// HasKey returns true if section contains a key with given name.
func (s *Section) Haskey(name string) bool {
func (s *Section) HasKey(name string) bool {
key, _ := s.GetKey(name)
return key != nil
}
// HasKey returns true if section contains given raw value.
// Haskey is a backwards-compatible name for HasKey.
func (s *Section) Haskey(name string) bool {
return s.HasKey(name)
}
// HasValue returns true if section contains given raw value.
func (s *Section) HasValue(value string) bool {
if s.f.BlockMode {
s.f.lock.RLock()
@@ -867,240 +877,6 @@ func (f *File) DeleteSection(name string) {
}
}
func cutComment(str string) string {
i := strings.Index(str, "#")
if i == -1 {
return str
}
return str[:i]
}
func checkMultipleLines(buf *bufio.Reader, line, val, valQuote string) (string, error) {
isEnd := false
for {
next, err := buf.ReadString('\n')
if err != nil {
if err != io.EOF {
return "", err
}
isEnd = true
}
pos := strings.LastIndex(next, valQuote)
if pos > -1 {
val += next[:pos]
break
}
val += next
if isEnd {
return "", fmt.Errorf("error parsing line: missing closing key quote from '%s' to '%s'", line, next)
}
}
return val, nil
}
func checkContinuationLines(buf *bufio.Reader, val string) (string, bool, error) {
isEnd := false
for {
valLen := len(val)
if valLen == 0 || val[valLen-1] != '\\' {
break
}
val = val[:valLen-1]
next, err := buf.ReadString('\n')
if err != nil {
if err != io.EOF {
return "", isEnd, err
}
isEnd = true
}
next = strings.TrimSpace(next)
if len(next) == 0 {
break
}
val += next
}
return val, isEnd, nil
}
// parse parses data through an io.Reader.
func (f *File) parse(reader io.Reader) error {
buf := bufio.NewReader(reader)
// Handle BOM-UTF8.
// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding
mask, err := buf.Peek(3)
if err == nil && len(mask) >= 3 && mask[0] == 239 && mask[1] == 187 && mask[2] == 191 {
buf.Read(mask)
}
count := 1
comments := ""
isEnd := false
section, err := f.NewSection(DEFAULT_SECTION)
if err != nil {
return err
}
for {
line, err := buf.ReadString('\n')
line = strings.TrimSpace(line)
length := len(line)
// Check error and ignore io.EOF just for a moment.
if err != nil {
if err != io.EOF {
return fmt.Errorf("error reading next line: %v", err)
}
// The last line of file could be an empty line.
if length == 0 {
break
}
isEnd = true
}
// Skip empty lines.
if length == 0 {
continue
}
switch {
case line[0] == '#' || line[0] == ';': // Comments.
if len(comments) == 0 {
comments = line
} else {
comments += LineBreak + line
}
continue
case line[0] == '[' && line[length-1] == ']': // New sction.
section, err = f.NewSection(strings.TrimSpace(line[1 : length-1]))
if err != nil {
return err
}
if len(comments) > 0 {
section.Comment = comments
comments = ""
}
// Reset counter.
count = 1
continue
}
// Other possibilities.
var (
i int
keyQuote string
kname string
valQuote string
val string
)
// Key name surrounded by quotes.
if line[0] == '"' {
if length > 6 && line[0:3] == `"""` {
keyQuote = `"""`
} else {
keyQuote = `"`
}
} else if line[0] == '`' {
keyQuote = "`"
}
if len(keyQuote) > 0 {
qLen := len(keyQuote)
pos := strings.Index(line[qLen:], keyQuote)
if pos == -1 {
return fmt.Errorf("error parsing line: missing closing key quote: %s", line)
}
pos = pos + qLen
i = strings.IndexAny(line[pos:], "=:")
if i < 0 {
return fmt.Errorf("error parsing line: key-value delimiter not found: %s", line)
} else if i == pos {
return fmt.Errorf("error parsing line: key is empty: %s", line)
}
i = i + pos
kname = line[qLen:pos] // Just keep spaces inside quotes.
} else {
i = strings.IndexAny(line, "=:")
if i < 0 {
return fmt.Errorf("error parsing line: key-value delimiter not found: %s", line)
} else if i == 0 {
return fmt.Errorf("error parsing line: key is empty: %s", line)
}
kname = strings.TrimSpace(line[0:i])
}
isAutoIncr := false
// Auto increment.
if kname == "-" {
isAutoIncr = true
kname = "#" + fmt.Sprint(count)
count++
}
lineRight := strings.TrimSpace(line[i+1:])
lineRightLength := len(lineRight)
firstChar := ""
if lineRightLength >= 2 {
firstChar = lineRight[0:1]
}
if firstChar == "`" {
valQuote = "`"
} else if firstChar == `"` {
if lineRightLength >= 3 && lineRight[0:3] == `"""` {
valQuote = `"""`
} else {
valQuote = `"`
}
} else if firstChar == `'` {
valQuote = `'`
}
if len(valQuote) > 0 {
qLen := len(valQuote)
pos := strings.LastIndex(lineRight[qLen:], valQuote)
// For multiple-line value check.
if pos == -1 {
if valQuote == `"` || valQuote == `'` {
return fmt.Errorf("error parsing line: single quote does not allow multiple-line value: %s", line)
}
val = lineRight[qLen:] + "\n"
val, err = checkMultipleLines(buf, line, val, valQuote)
if err != nil {
return err
}
} else {
val = lineRight[qLen : pos+qLen]
}
} else {
val = strings.TrimSpace(cutComment(lineRight))
val, isEnd, err = checkContinuationLines(buf, val)
if err != nil {
return err
}
}
k, err := section.NewKey(kname, val)
if err != nil {
return err
}
k.isAutoIncr = isAutoIncr
if len(comments) > 0 {
k.Comment = comments
comments = ""
}
if isEnd {
break
}
}
return nil
}
func (f *File) reload(s dataSource) error {
r, err := s.ReadCloser()
if err != nil {
@@ -1190,17 +966,18 @@ func (f *File) WriteToIndent(w io.Writer, indent string) (n int64, err error) {
switch {
case key.isAutoIncr:
kname = "-"
case strings.Contains(kname, "`") || strings.Contains(kname, `"`):
kname = `"""` + kname + `"""`
case strings.Contains(kname, `=`) || strings.Contains(kname, `:`):
case strings.ContainsAny(kname, "\"=:"):
kname = "`" + kname + "`"
case strings.Contains(kname, "`"):
kname = `"""` + kname + `"""`
}
val := key.value
// In case key value contains "\n", "`" or "\"".
if strings.Contains(val, "\n") || strings.Contains(val, "`") || strings.Contains(val, `"`) ||
strings.Contains(val, "#") {
// In case key value contains "\n", "`", "\"", "#" or ";".
if strings.ContainsAny(val, "\n`") {
val = `"""` + val + `"""`
} else if strings.ContainsAny(val, "#;") {
val = "`" + val + "`"
}
if _, err = buf.WriteString(kname + equalSign + val + LineBreak); err != nil {
return 0, err

312
Godeps/_workspace/src/github.com/go-ini/ini/parser.go generated vendored Normal file
View File

@@ -0,0 +1,312 @@
// Copyright 2015 Unknwon
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
package ini
import (
"bufio"
"bytes"
"fmt"
"io"
"strconv"
"strings"
"unicode"
)
type tokenType int
const (
_TOKEN_INVALID tokenType = iota
_TOKEN_COMMENT
_TOKEN_SECTION
_TOKEN_KEY
)
type parser struct {
buf *bufio.Reader
isEOF bool
count int
comment *bytes.Buffer
}
func newParser(r io.Reader) *parser {
return &parser{
buf: bufio.NewReader(r),
count: 1,
comment: &bytes.Buffer{},
}
}
// BOM handles header of BOM-UTF8 format.
// http://en.wikipedia.org/wiki/Byte_order_mark#Representations_of_byte_order_marks_by_encoding
func (p *parser) BOM() error {
mask, err := p.buf.Peek(3)
if err != nil && err != io.EOF {
return err
} else if len(mask) < 3 {
return nil
} else if mask[0] == 239 && mask[1] == 187 && mask[2] == 191 {
p.buf.Read(mask)
}
return nil
}
func (p *parser) readUntil(delim byte) ([]byte, error) {
data, err := p.buf.ReadBytes(delim)
if err != nil {
if err == io.EOF {
p.isEOF = true
} else {
return nil, err
}
}
return data, nil
}
func cleanComment(in []byte) ([]byte, bool) {
i := bytes.IndexAny(in, "#;")
if i == -1 {
return nil, false
}
return in[i:], true
}
func readKeyName(in []byte) (string, int, error) {
line := string(in)
// Check if key name surrounded by quotes.
var keyQuote string
if line[0] == '"' {
if len(line) > 6 && string(line[0:3]) == `"""` {
keyQuote = `"""`
} else {
keyQuote = `"`
}
} else if line[0] == '`' {
keyQuote = "`"
}
// Get out key name
endIdx := -1
if len(keyQuote) > 0 {
startIdx := len(keyQuote)
// FIXME: fail case -> """"""name"""=value
pos := strings.Index(line[startIdx:], keyQuote)
if pos == -1 {
return "", -1, fmt.Errorf("missing closing key quote: %s", line)
}
pos += startIdx
// Find key-value delimiter
i := strings.IndexAny(line[pos+startIdx:], "=:")
if i < 0 {
return "", -1, fmt.Errorf("key-value delimiter not found: %s", line)
}
endIdx = pos + i
return strings.TrimSpace(line[startIdx:pos]), endIdx + startIdx + 1, nil
}
endIdx = strings.IndexAny(line, "=:")
if endIdx < 0 {
return "", -1, fmt.Errorf("key-value delimiter not found: %s", line)
}
return strings.TrimSpace(line[0:endIdx]), endIdx + 1, nil
}
func (p *parser) readMultilines(line, val, valQuote string) (string, error) {
for {
data, err := p.readUntil('\n')
if err != nil {
return "", err
}
next := string(data)
pos := strings.LastIndex(next, valQuote)
if pos > -1 {
val += next[:pos]
comment, has := cleanComment([]byte(next[pos:]))
if has {
p.comment.Write(bytes.TrimSpace(comment))
}
break
}
val += next
if p.isEOF {
return "", fmt.Errorf("missing closing key quote from '%s' to '%s'", line, next)
}
}
return val, nil
}
func (p *parser) readContinuationLines(val string) (string, error) {
for {
data, err := p.readUntil('\n')
if err != nil {
return "", err
}
next := strings.TrimSpace(string(data))
if len(next) == 0 {
break
}
val += next
if val[len(val)-1] != '\\' {
break
}
val = val[:len(val)-1]
}
return val, nil
}
// hasSurroundedQuote check if and only if the first and last characters
// are quotes \" or \'.
// It returns false if any other parts also contain same kind of quotes.
func hasSurroundedQuote(in string, quote byte) bool {
return len(in) > 2 && in[0] == quote && in[len(in)-1] == quote &&
strings.IndexByte(in[1:], quote) == len(in)-2
}
func (p *parser) readValue(in []byte) (string, error) {
line := strings.TrimLeftFunc(string(in), unicode.IsSpace)
if len(line) == 0 {
return "", nil
}
var valQuote string
if len(line) > 3 && string(line[0:3]) == `"""` {
valQuote = `"""`
} else if line[0] == '`' {
valQuote = "`"
}
if len(valQuote) > 0 {
startIdx := len(valQuote)
pos := strings.LastIndex(line[startIdx:], valQuote)
// Check for multi-line value
if pos == -1 {
return p.readMultilines(line, line[startIdx:], valQuote)
}
return line[startIdx : pos+startIdx], nil
}
// Won't be able to reach here if value only contains whitespace.
line = strings.TrimSpace(line)
// Check continuation lines
if line[len(line)-1] == '\\' {
return p.readContinuationLines(line[:len(line)-1])
}
i := strings.IndexAny(line, "#;")
if i > -1 {
p.comment.WriteString(line[i:])
line = strings.TrimSpace(line[:i])
}
// Trim single quotes
if hasSurroundedQuote(line, '\'') ||
hasSurroundedQuote(line, '"') {
line = line[1 : len(line)-1]
}
return line, nil
}
// parse parses data through an io.Reader.
func (f *File) parse(reader io.Reader) (err error) {
p := newParser(reader)
if err = p.BOM(); err != nil {
return fmt.Errorf("BOM: %v", err)
}
// Ignore error because default section name is never empty string.
section, _ := f.NewSection(DEFAULT_SECTION)
var line []byte
for !p.isEOF {
line, err = p.readUntil('\n')
if err != nil {
return err
}
line = bytes.TrimLeftFunc(line, unicode.IsSpace)
if len(line) == 0 {
continue
}
// Comments
if line[0] == '#' || line[0] == ';' {
// Note: we do not care ending line break,
// it is needed for adding second line,
// so just clean it once at the end when set to value.
p.comment.Write(line)
continue
}
// Section
if line[0] == '[' {
// Read to the next ']' (TODO: support quoted strings)
closeIdx := bytes.IndexByte(line, ']')
if closeIdx == -1 {
return fmt.Errorf("unclosed section: %s", line)
}
section, err = f.NewSection(string(line[1:closeIdx]))
if err != nil {
return err
}
comment, has := cleanComment(line[closeIdx+1:])
if has {
p.comment.Write(comment)
}
section.Comment = strings.TrimSpace(p.comment.String())
// Reset aotu-counter and comments
p.comment.Reset()
p.count = 1
continue
}
kname, offset, err := readKeyName(line)
if err != nil {
return err
}
// Auto increment.
isAutoIncr := false
if kname == "-" {
isAutoIncr = true
kname = "#" + strconv.Itoa(p.count)
p.count++
}
key, err := section.NewKey(kname, "")
if err != nil {
return err
}
key.isAutoIncr = isAutoIncr
value, err := p.readValue(line[offset:])
if err != nil {
return err
}
key.SetValue(value)
key.Comment = strings.TrimSpace(p.comment.String())
p.comment.Reset()
}
return nil
}

View File

@@ -94,13 +94,14 @@ func setWithProperType(t reflect.Type, key *Key, field reflect.Value, delim stri
field.SetBool(boolVal)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
durationVal, err := key.Duration()
if err == nil {
// Skip zero value
if err == nil && int(durationVal) > 0 {
field.Set(reflect.ValueOf(durationVal))
return nil
}
intVal, err := key.Int64()
if err != nil {
if err != nil || intVal == 0 {
return nil
}
field.SetInt(intVal)

View File

@@ -1,4 +1,4 @@
[![GoDoc](https://godoc.org/gopkg.in/ldap.v1?status.svg)](https://godoc.org/gopkg.in/ldap.v1)
[![GoDoc](https://godoc.org/gopkg.in/ldap.v2?status.svg)](https://godoc.org/gopkg.in/ldap.v2)
[![Build Status](https://travis-ci.org/go-ldap/ldap.svg)](https://travis-ci.org/go-ldap/ldap)
# Basic LDAP v3 functionality for the GO programming language.

View File

@@ -4,6 +4,7 @@ go:
- 1.2
- 1.3
- 1.4
- 1.5
- tip
before_script:

View File

@@ -15,6 +15,7 @@ Aaron Hopkins <go-sql-driver at die.net>
Arne Hormann <arnehormann at gmail.com>
Carlos Nieto <jose.carlos at menteslibres.net>
Chris Moos <chris at tech9computers.com>
Daniel Nichter <nil at codenode.com>
DisposaBoy <disposaboy at dby.me>
Frederick Mayle <frederickmayle at gmail.com>
Gustavo Kristic <gkristic at gmail.com>
@@ -25,8 +26,10 @@ INADA Naoki <songofacandy at gmail.com>
James Harr <james.harr at gmail.com>
Jian Zhen <zhenjl at gmail.com>
Joshua Prunier <joshua.prunier at gmail.com>
Julien Lefevre <julien.lefevr at gmail.com>
Julien Schmidt <go-sql-driver at julienschmidt.com>
Kamil Dziedzic <kamil at klecza.pl>
Kevin Malachowski <kevin at chowski.com>
Leonardo YongUk Kim <dalinaum at gmail.com>
Lucas Liu <extrafliu at gmail.com>
Luke Scott <luke at webconnex.com>
@@ -37,7 +40,6 @@ Soroush Pour <me at soroushjp.com>
Stan Putrya <root.vagner at gmail.com>
Xiaobing Jiang <s7v7nislands at gmail.com>
Xiuming Chen <cc at cxm.cc>
Julien Lefevre <julien.lefevr at gmail.com>
# Organizations

View File

@@ -12,10 +12,21 @@ Bugfixes:
- Enable microsecond resolution on TIME, DATETIME and TIMESTAMP (#249)
- Fixed handling of queries without columns and rows (#255)
- Fixed a panic when SetKeepAlive() failed (#298)
- Support receiving ERR packet while reading rows (#321)
- Fixed reading NULL length-encoded integers in MySQL 5.6+ (#349)
- Fixed absolute paths support in LOAD LOCAL DATA INFILE (#356)
- Actually zero out bytes in handshake response (#378)
- Fixed race condition in registering LOAD DATA INFILE handler (#383)
- Fixed tests with MySQL 5.7.9+ (#380)
- QueryUnescape TLS config names (#397)
- Fixed "broken pipe" error by writing to closed socket (#390)
New Features:
- Support for returning table alias on Columns() (#289)
- Support for returning table alias on Columns() (#289, #359, #382)
- Placeholder interpolation, can be actived with the DSN parameter `interpolateParams=true` (#309, #318)
- Support for uint64 parameters with high bit set (#332, #345)
- Cleartext authentication plugin support (#327)
## Version 1.2 (2014-06-03)

View File

@@ -120,18 +120,27 @@ func (mc *mysqlConn) Close() (err error) {
// Makes Close idempotent
if mc.netConn != nil {
err = mc.writeCommandPacket(comQuit)
if err == nil {
err = mc.netConn.Close()
} else {
mc.netConn.Close()
}
mc.cleanup()
return
}
// Closes the network connection and unsets internal variables. Do not call this
// function after successfully authentication, call Close instead. This function
// is called before auth or on auth failure because MySQL will have already
// closed the network connection.
func (mc *mysqlConn) cleanup() {
// Makes cleanup idempotent
if mc.netConn != nil {
if err := mc.netConn.Close(); err != nil {
errLog.Print(err)
}
mc.netConn = nil
}
mc.cfg = nil
mc.buf.rd = nil
return
}
func (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) {

View File

@@ -84,43 +84,23 @@ func (d MySQLDriver) Open(dsn string) (driver.Conn, error) {
// Reading Handshake Initialization Packet
cipher, err := mc.readInitPacket()
if err != nil {
mc.Close()
mc.cleanup()
return nil, err
}
// Send Client Authentication Packet
if err = mc.writeAuthPacket(cipher); err != nil {
mc.Close()
mc.cleanup()
return nil, err
}
// Read Result Packet
err = mc.readResultOK()
if err != nil {
// Retry with old authentication method, if allowed
if mc.cfg != nil && mc.cfg.allowOldPasswords && err == ErrOldPassword {
if err = mc.writeOldAuthPacket(cipher); err != nil {
mc.Close()
return nil, err
}
if err = mc.readResultOK(); err != nil {
mc.Close()
return nil, err
}
} else if mc.cfg != nil && mc.cfg.allowCleartextPasswords && err == ErrCleartextPassword {
if err = mc.writeClearAuthPacket(); err != nil {
mc.Close()
return nil, err
}
if err = mc.readResultOK(); err != nil {
mc.Close()
return nil, err
}
} else {
mc.Close()
return nil, err
}
// Handle response to auth packet, switch methods if possible
if err = handleAuthResult(mc, cipher); err != nil {
// Authentication failed and MySQL has already closed the connection
// (https://dev.mysql.com/doc/internals/en/authentication-fails.html).
// Do not send COM_QUIT, just cleanup and return the error.
mc.cleanup()
return nil, err
}
// Get max allowed packet size
@@ -144,6 +124,38 @@ func (d MySQLDriver) Open(dsn string) (driver.Conn, error) {
return mc, nil
}
func handleAuthResult(mc *mysqlConn, cipher []byte) error {
// Read Result Packet
err := mc.readResultOK()
if err == nil {
return nil // auth successful
}
if mc.cfg == nil {
return err // auth failed and retry not possible
}
// Retry auth if configured to do so.
if mc.cfg.allowOldPasswords && err == ErrOldPassword {
// Retry with old authentication method. Note: there are edge cases
// where this should work but doesn't; this is currently "wontfix":
// https://github.com/go-sql-driver/mysql/issues/184
if err = mc.writeOldAuthPacket(cipher); err != nil {
return err
}
err = mc.readResultOK()
} else if mc.cfg.allowCleartextPasswords && err == ErrCleartextPassword {
// Retry with clear text password for
// http://dev.mysql.com/doc/refman/5.7/en/cleartext-authentication-plugin.html
// http://dev.mysql.com/doc/refman/5.7/en/pam-authentication-plugin.html
if err = mc.writeClearAuthPacket(); err != nil {
return err
}
err = mc.readResultOK()
}
return err
}
func init() {
sql.Register("mysql", &MySQLDriver{})
}

View File

@@ -267,6 +267,8 @@ func parseDSNParams(cfg *config, params string) (err error) {
if boolValue {
cfg.tls = &tls.Config{}
}
} else if value, err := url.QueryUnescape(value); err != nil {
return fmt.Errorf("Invalid value for tls config name: %v", err)
} else {
if strings.ToLower(value) == "skip-verify" {
cfg.tls = &tls.Config{InsecureSkipVerify: true}

View File

@@ -61,3 +61,5 @@ Johnny Bergström <johnny@joonix.se>
Adriano Orioli <orioli.adriano@gmail.com>
Claudiu Raveica <claudiu.raveica@gmail.com>
Artem Chernyshev <artem.0xD2@gmail.com>
Ference Fu <fym201@msn.com>
LOVOO <opensource@lovoo.com>

View File

@@ -12,6 +12,12 @@ Project Website: http://gocql.github.io/<br>
API documentation: http://godoc.org/github.com/gocql/gocql<br>
Discussions: https://groups.google.com/forum/#!forum/gocql
Production Stability
--------------------
The method in which the driver maintains and discovers hosts in the Cassandra cluster changed when adding support for event driven discovery using serverside events. This has meant many things in the driver internally have been touched and changed, as such if you would like to go back to the historical node discovery the tag `pre-node-events` is a tree which uses the old polling based discovery.
If you run into bugs related to node discovery using events please open a ticket.
Supported Versions
------------------
@@ -167,7 +173,7 @@ There are various ways to bind application level data structures to CQL statemen
* Building on top of the gocql driver, [cqlr](https://github.com/relops/cqlr) adds the ability to auto-bind a CQL iterator to a struct or to bind a struct to an INSERT statement.
* Another external project that layers on top of gocql is [cqlc](http://relops.com/cqlc) which generates gocql compliant code from your Cassandra schema so that you can write type safe CQL statements in Go with a natural query syntax.
* [gocassa](https://github.com/hailocab/gocassa) is an external project that layers on top of gocql to provide convenient query building and data binding.
* [gocqltable](https://github.com/elvtechnology/gocqltable) provides an ORM-style convenience layer to make CRUD operations with gocql easier.
* [gocqltable](https://github.com/kristoiv/gocqltable) provides an ORM-style convenience layer to make CRUD operations with gocql easier.
Ecosphere
---------
@@ -179,7 +185,7 @@ The following community maintained tools are known to integrate with gocql:
* [cqlr](https://github.com/relops/cqlr) adds the ability to auto-bind a CQL iterator to a struct or to bind a struct to an INSERT statement.
* [cqlc](http://relops.com/cqlc) which generates gocql compliant code from your Cassandra schema so that you can write type safe CQL statements in Go with a natural query syntax.
* [gocassa](https://github.com/hailocab/gocassa) provides query building, adds data binding, and provides easy-to-use "recipe" tables for common query use-cases.
* [gocqltable](https://github.com/elvtechnology/gocqltable) is a wrapper around gocql that aims to simplify common operations whilst working the library.
* [gocqltable](https://github.com/kristoiv/gocqltable) is a wrapper around gocql that aims to simplify common operations whilst working the library.
Other Projects
--------------

View File

@@ -0,0 +1,167 @@
// +build ccm
package ccm
import (
"bufio"
"bytes"
"errors"
"fmt"
"os/exec"
"strings"
)
func execCmd(args ...string) (*bytes.Buffer, error) {
cmd := exec.Command("ccm", args...)
stdout := &bytes.Buffer{}
cmd.Stdout = stdout
cmd.Stderr = &bytes.Buffer{}
if err := cmd.Run(); err != nil {
return nil, errors.New(cmd.Stderr.(*bytes.Buffer).String())
}
return stdout, nil
}
func AllUp() error {
status, err := Status()
if err != nil {
return err
}
for _, host := range status {
if !host.State.IsUp() {
if err := NodeUp(host.Name); err != nil {
return err
}
}
}
return nil
}
func NodeUp(node string) error {
_, err := execCmd(node, "start", "--wait-for-binary-proto", "--wait-other-notice")
return err
}
func NodeDown(node string) error {
_, err := execCmd(node, "stop")
return err
}
type Host struct {
State NodeState
Addr string
Name string
}
type NodeState int
func (n NodeState) String() string {
if n == NodeStateUp {
return "UP"
} else if n == NodeStateDown {
return "DOWN"
} else {
return fmt.Sprintf("UNKNOWN_STATE_%d", n)
}
}
func (n NodeState) IsUp() bool {
return n == NodeStateUp
}
const (
NodeStateUp NodeState = iota
NodeStateDown
)
func Status() (map[string]Host, error) {
// TODO: parse into struct o maniuplate
out, err := execCmd("status", "-v")
if err != nil {
return nil, err
}
const (
stateCluster = iota
stateCommas
stateNode
stateOption
)
nodes := make(map[string]Host)
// didnt really want to write a full state machine parser
state := stateCluster
sc := bufio.NewScanner(out)
var host Host
for sc.Scan() {
switch state {
case stateCluster:
text := sc.Text()
if !strings.HasPrefix(text, "Cluster:") {
return nil, fmt.Errorf("expected 'Cluster:' got %q", text)
}
state = stateCommas
case stateCommas:
text := sc.Text()
if !strings.HasPrefix(text, "-") {
return nil, fmt.Errorf("expected commas got %q", text)
}
state = stateNode
case stateNode:
// assume nodes start with node
text := sc.Text()
if !strings.HasPrefix(text, "node") {
return nil, fmt.Errorf("expected 'node' got %q", text)
}
line := strings.Split(text, ":")
host.Name = line[0]
nodeState := strings.TrimSpace(line[1])
switch nodeState {
case "UP":
host.State = NodeStateUp
case "DOWN":
host.State = NodeStateDown
default:
return nil, fmt.Errorf("unknown node state from ccm: %q", nodeState)
}
state = stateOption
case stateOption:
text := sc.Text()
if text == "" {
state = stateNode
nodes[host.Name] = host
host = Host{}
continue
}
line := strings.Split(strings.TrimSpace(text), "=")
k, v := line[0], line[1]
if k == "binary" {
// could check errors
// ('127.0.0.1', 9042)
v = v[2:] // (''
if i := strings.IndexByte(v, '\''); i < 0 {
return nil, fmt.Errorf("invalid binary v=%q", v)
} else {
host.Addr = v[:i]
// dont need port
}
}
default:
return nil, fmt.Errorf("unexpected state: %q", state)
}
}
if err := sc.Err(); err != nil {
return nil, fmt.Errorf("unable to parse ccm status: %v", err)
}
return nodes, nil
}

View File

@@ -40,16 +40,6 @@ func initStmtsLRU(max int) {
}
}
// To enable periodic node discovery enable DiscoverHosts in ClusterConfig
type DiscoveryConfig struct {
// If not empty will filter all discoverred hosts to a single Data Centre (default: "")
DcFilter string
// If not empty will filter all discoverred hosts to a single Rack (default: "")
RackFilter string
// The interval to check for new hosts (default: 30s)
Sleep time.Duration
}
// PoolConfig configures the connection pool used by the driver, it defaults to
// using a round robbin host selection policy and a round robbin connection selection
// policy for each host.
@@ -63,7 +53,7 @@ type PoolConfig struct {
ConnSelectionPolicy func() ConnSelectionPolicy
}
func (p PoolConfig) buildPool(session *Session) (*policyConnPool, error) {
func (p PoolConfig) buildPool(session *Session) *policyConnPool {
hostSelection := p.HostSelectionPolicy
if hostSelection == nil {
hostSelection = RoundRobinHostPolicy()
@@ -77,6 +67,27 @@ func (p PoolConfig) buildPool(session *Session) (*policyConnPool, error) {
return newPolicyConnPool(session, hostSelection, connSelection)
}
type DiscoveryConfig struct {
// If not empty will filter all discoverred hosts to a single Data Centre (default: "")
DcFilter string
// If not empty will filter all discoverred hosts to a single Rack (default: "")
RackFilter string
// ignored
Sleep time.Duration
}
func (d DiscoveryConfig) matchFilter(host *HostInfo) bool {
if d.DcFilter != "" && d.DcFilter != host.DataCenter() {
return false
}
if d.RackFilter != "" && d.RackFilter != host.Rack() {
return false
}
return true
}
// ClusterConfig is a struct to configure the default cluster implementation
// of gocoql. It has a varity of attributes that can be used to modify the
// behavior to fit the most common use cases. Applications that requre a
@@ -94,18 +105,18 @@ type ClusterConfig struct {
Authenticator Authenticator // authenticator (default: nil)
RetryPolicy RetryPolicy // Default retry policy to use for queries (default: 0)
SocketKeepalive time.Duration // The keepalive period to use, enabled if > 0 (default: 0)
DiscoverHosts bool // If set, gocql will attempt to automatically discover other members of the Cassandra cluster (default: false)
MaxPreparedStmts int // Sets the maximum cache size for prepared statements globally for gocql (default: 1000)
MaxRoutingKeyInfo int // Sets the maximum cache size for query info about statements for each session (default: 1000)
PageSize int // Default page size to use for created sessions (default: 5000)
SerialConsistency SerialConsistency // Sets the consistency for the serial part of queries, values can be either SERIAL or LOCAL_SERIAL (default: unset)
Discovery DiscoveryConfig
SslOpts *SslOptions
DefaultTimestamp bool // Sends a client side timestamp for all requests which overrides the timestamp at which it arrives at the server. (default: true, only enabled for protocol 3 and above)
// PoolConfig configures the underlying connection pool, allowing the
// configuration of host selection and connection selection policies.
PoolConfig PoolConfig
Discovery DiscoveryConfig
// The maximum amount of time to wait for schema agreement in a cluster after
// receiving a schema change frame. (deault: 60s)
MaxWaitSchemaAgreement time.Duration
@@ -124,7 +135,6 @@ func NewCluster(hosts ...string) *ClusterConfig {
Port: 9042,
NumConns: 2,
Consistency: Quorum,
DiscoverHosts: false,
MaxPreparedStmts: defaultMaxPreparedStmts,
MaxRoutingKeyInfo: 1000,
PageSize: 5000,

View File

@@ -141,7 +141,6 @@ type Conn struct {
}
// Connect establishes a connection to a Cassandra node.
// You must also call the Serve method before you can execute any queries.
func Connect(addr string, cfg *ConnConfig, errorHandler ConnErrorHandler, session *Session) (*Conn, error) {
var (
err error
@@ -397,7 +396,12 @@ func (c *Conn) recv() error {
return fmt.Errorf("gocql: frame header stream is beyond call exepected bounds: %d", head.stream)
} else if head.stream == -1 {
// TODO: handle cassandra event frames, we shouldnt get any currently
return c.discardFrame(head)
framer := newFramer(c, c, c.compressor, c.version)
if err := framer.readFrame(&head); err != nil {
return err
}
go c.session.handleEvent(framer)
return nil
} else if head.stream <= 0 {
// reserved stream that we dont use, probably due to a protocol error
// or a bug in Cassandra, this should be an error, parse it and return.
@@ -739,7 +743,10 @@ func (c *Conn) executeQuery(qry *Query) *Iter {
return &Iter{framer: framer}
case *resultSchemaChangeFrame, *schemaChangeKeyspace, *schemaChangeTable, *schemaChangeFunction:
iter := &Iter{framer: framer}
c.awaitSchemaAgreement()
if err := c.awaitSchemaAgreement(); err != nil {
// TODO: should have this behind a flag
log.Println(err)
}
// dont return an error from this, might be a good idea to give a warning
// though. The impact of this returning an error would be that the cluster
// is not consistent with regards to its schema.
@@ -939,11 +946,13 @@ func (c *Conn) awaitSchemaAgreement() (err error) {
localSchemas = "SELECT schema_version FROM system.local WHERE key='local'"
)
var versions map[string]struct{}
endDeadline := time.Now().Add(c.session.cfg.MaxWaitSchemaAgreement)
for time.Now().Before(endDeadline) {
iter := c.query(peerSchemas)
versions := make(map[string]struct{})
versions = make(map[string]struct{})
var schemaVersion string
for iter.Scan(&schemaVersion) {
@@ -977,8 +986,13 @@ func (c *Conn) awaitSchemaAgreement() (err error) {
return
}
schemas := make([]string, 0, len(versions))
for schema := range versions {
schemas = append(schemas, schema)
}
// not exported
return errors.New("gocql: cluster schema versions not consistent")
return fmt.Errorf("gocql: cluster schema versions not consistent: %+v", schemas)
}
type inflightPrepare struct {

View File

@@ -19,7 +19,7 @@ import (
// interface to implement to receive the host information
type SetHosts interface {
SetHosts(hosts []HostInfo)
SetHosts(hosts []*HostInfo)
}
// interface to implement to receive the partitioner value
@@ -62,25 +62,25 @@ type policyConnPool struct {
port int
numConns int
connCfg *ConnConfig
keyspace string
mu sync.RWMutex
hostPolicy HostSelectionPolicy
connPolicy func() ConnSelectionPolicy
hostConnPools map[string]*hostConnPool
endpoints []string
}
func newPolicyConnPool(session *Session, hostPolicy HostSelectionPolicy,
connPolicy func() ConnSelectionPolicy) (*policyConnPool, error) {
func connConfig(session *Session) (*ConnConfig, error) {
cfg := session.cfg
var (
err error
tlsConfig *tls.Config
)
cfg := session.cfg
// TODO(zariel): move tls config setup into session init.
if cfg.SslOpts != nil {
tlsConfig, err = setupTLSConfig(cfg.SslOpts)
if err != nil {
@@ -88,37 +88,38 @@ func newPolicyConnPool(session *Session, hostPolicy HostSelectionPolicy,
}
}
return &ConnConfig{
ProtoVersion: cfg.ProtoVersion,
CQLVersion: cfg.CQLVersion,
Timeout: cfg.Timeout,
Compressor: cfg.Compressor,
Authenticator: cfg.Authenticator,
Keepalive: cfg.SocketKeepalive,
tlsConfig: tlsConfig,
}, nil
}
func newPolicyConnPool(session *Session, hostPolicy HostSelectionPolicy,
connPolicy func() ConnSelectionPolicy) *policyConnPool {
// create the pool
pool := &policyConnPool{
session: session,
port: cfg.Port,
numConns: cfg.NumConns,
connCfg: &ConnConfig{
ProtoVersion: cfg.ProtoVersion,
CQLVersion: cfg.CQLVersion,
Timeout: cfg.Timeout,
Compressor: cfg.Compressor,
Authenticator: cfg.Authenticator,
Keepalive: cfg.SocketKeepalive,
tlsConfig: tlsConfig,
},
keyspace: cfg.Keyspace,
session: session,
port: session.cfg.Port,
numConns: session.cfg.NumConns,
keyspace: session.cfg.Keyspace,
hostPolicy: hostPolicy,
connPolicy: connPolicy,
hostConnPools: map[string]*hostConnPool{},
}
hosts := make([]HostInfo, len(cfg.Hosts))
for i, hostAddr := range cfg.Hosts {
hosts[i].Peer = hostAddr
}
pool.endpoints = make([]string, len(session.cfg.Hosts))
copy(pool.endpoints, session.cfg.Hosts)
pool.SetHosts(hosts)
return pool, nil
return pool
}
func (p *policyConnPool) SetHosts(hosts []HostInfo) {
func (p *policyConnPool) SetHosts(hosts []*HostInfo) {
p.mu.Lock()
defer p.mu.Unlock()
@@ -127,26 +128,40 @@ func (p *policyConnPool) SetHosts(hosts []HostInfo) {
toRemove[addr] = struct{}{}
}
// TODO connect to hosts in parallel, but wait for pools to be
// created before returning
pools := make(chan *hostConnPool)
createCount := 0
for _, host := range hosts {
if !host.IsUp() {
// don't create a connection pool for a down host
continue
}
if _, exists := p.hostConnPools[host.Peer()]; exists {
// still have this host, so don't remove it
delete(toRemove, host.Peer())
continue
}
for i := range hosts {
pool, exists := p.hostConnPools[hosts[i].Peer]
if !exists {
createCount++
go func(host *HostInfo) {
// create a connection pool for the host
pool = newHostConnPool(
pools <- newHostConnPool(
p.session,
hosts[i].Peer,
host,
p.port,
p.numConns,
p.connCfg,
p.keyspace,
p.connPolicy(),
)
p.hostConnPools[hosts[i].Peer] = pool
} else {
// still have this host, so don't remove it
delete(toRemove, hosts[i].Peer)
}(host)
}
// add created pools
for createCount > 0 {
pool := <-pools
createCount--
if pool.Size() > 0 {
// add pool onyl if there a connections available
p.hostConnPools[pool.host.Peer()] = pool
}
}
@@ -158,7 +173,6 @@ func (p *policyConnPool) SetHosts(hosts []HostInfo) {
// update the policy
p.hostPolicy.SetHosts(hosts)
}
func (p *policyConnPool) SetPartitioner(partitioner string) {
@@ -194,7 +208,7 @@ func (p *policyConnPool) Pick(qry *Query) (SelectedHost, *Conn) {
panic(fmt.Sprintf("policy %T returned no host info: %+v", p.hostPolicy, host))
}
pool, ok := p.hostConnPools[host.Info().Peer]
pool, ok := p.hostConnPools[host.Info().Peer()]
if !ok {
continue
}
@@ -209,7 +223,7 @@ func (p *policyConnPool) Close() {
defer p.mu.Unlock()
// remove the hosts from the policy
p.hostPolicy.SetHosts([]HostInfo{})
p.hostPolicy.SetHosts(nil)
// close the pools
for addr, pool := range p.hostConnPools {
@@ -218,15 +232,69 @@ func (p *policyConnPool) Close() {
}
}
func (p *policyConnPool) addHost(host *HostInfo) {
p.mu.Lock()
defer p.mu.Unlock()
pool, ok := p.hostConnPools[host.Peer()]
if ok {
go pool.fill()
return
}
pool = newHostConnPool(
p.session,
host,
host.Port(),
p.numConns,
p.keyspace,
p.connPolicy(),
)
p.hostConnPools[host.Peer()] = pool
// update policy
// TODO: policy should not have conns, it should have hosts and return a host
// iter which the pool will use to serve conns
p.hostPolicy.AddHost(host)
}
func (p *policyConnPool) removeHost(addr string) {
p.hostPolicy.RemoveHost(addr)
p.mu.Lock()
pool, ok := p.hostConnPools[addr]
if !ok {
p.mu.Unlock()
return
}
delete(p.hostConnPools, addr)
p.mu.Unlock()
pool.Close()
}
func (p *policyConnPool) hostUp(host *HostInfo) {
// TODO(zariel): have a set of up hosts and down hosts, we can internally
// detect down hosts, then try to reconnect to them.
p.addHost(host)
}
func (p *policyConnPool) hostDown(addr string) {
// TODO(zariel): mark host as down so we can try to connect to it later, for
// now just treat it has removed.
p.removeHost(addr)
}
// hostConnPool is a connection pool for a single host.
// Connection selection is based on a provided ConnSelectionPolicy
type hostConnPool struct {
session *Session
host string
host *HostInfo
port int
addr string
size int
connCfg *ConnConfig
keyspace string
policy ConnSelectionPolicy
// protection for conns, closed, filling
@@ -236,16 +304,22 @@ type hostConnPool struct {
filling bool
}
func newHostConnPool(session *Session, host string, port, size int, connCfg *ConnConfig,
func (h *hostConnPool) String() string {
h.mu.RLock()
defer h.mu.RUnlock()
return fmt.Sprintf("[filling=%v closed=%v conns=%v size=%v host=%v]",
h.filling, h.closed, len(h.conns), h.size, h.host)
}
func newHostConnPool(session *Session, host *HostInfo, port, size int,
keyspace string, policy ConnSelectionPolicy) *hostConnPool {
pool := &hostConnPool{
session: session,
host: host,
port: port,
addr: JoinHostPort(host, port),
addr: JoinHostPort(host.Peer(), port),
size: size,
connCfg: connCfg,
keyspace: keyspace,
policy: policy,
conns: make([]*Conn, 0, size),
@@ -267,13 +341,16 @@ func (pool *hostConnPool) Pick(qry *Query) *Conn {
return nil
}
empty := len(pool.conns) == 0
size := len(pool.conns)
pool.mu.RUnlock()
if empty {
// try to fill the empty pool
if size < pool.size {
// try to fill the pool
go pool.fill()
return nil
if size == 0 {
return nil
}
}
return pool.policy.Pick(qry)
@@ -350,35 +427,26 @@ func (pool *hostConnPool) fill() {
if err != nil {
// probably unreachable host
go pool.fillingStopped()
pool.fillingStopped()
// this is calle with the connetion pool mutex held, this call will
// then recursivly try to lock it again. FIXME
go pool.session.handleNodeDown(net.ParseIP(pool.host.Peer()), pool.port)
return
}
// filled one
fillCount--
// connect all remaining connections to this host
pool.connectMany(fillCount)
// connect all connections to this host in sync
for fillCount > 0 {
err := pool.connect()
pool.logConnectErr(err)
// decrement, even on error
fillCount--
}
go pool.fillingStopped()
pool.fillingStopped()
return
}
// fill the rest of the pool asynchronously
go func() {
for fillCount > 0 {
err := pool.connect()
pool.logConnectErr(err)
// decrement, even on error
fillCount--
}
pool.connectMany(fillCount)
// mark the end of filling
pool.fillingStopped()
@@ -407,10 +475,28 @@ func (pool *hostConnPool) fillingStopped() {
pool.mu.Unlock()
}
// connectMany creates new connections concurrent.
func (pool *hostConnPool) connectMany(count int) {
if count == 0 {
return
}
var wg sync.WaitGroup
wg.Add(count)
for i := 0; i < count; i++ {
go func() {
defer wg.Done()
err := pool.connect()
pool.logConnectErr(err)
}()
}
// wait for all connections are done
wg.Wait()
}
// create a new connection to the host and add it to the pool
func (pool *hostConnPool) connect() error {
// try to connect
conn, err := Connect(pool.addr, pool.connCfg, pool, pool.session)
conn, err := pool.session.connect(pool.addr, pool)
if err != nil {
return err
}
@@ -433,7 +519,11 @@ func (pool *hostConnPool) connect() error {
}
pool.conns = append(pool.conns, conn)
pool.policy.SetConns(pool.conns)
conns := make([]*Conn, len(pool.conns))
copy(conns, pool.conns)
pool.policy.SetConns(conns)
return nil
}
@@ -444,6 +534,8 @@ func (pool *hostConnPool) HandleError(conn *Conn, err error, closed bool) {
return
}
// TODO: track the number of errors per host and detect when a host is dead,
// then also have something which can detect when a host comes back.
pool.mu.Lock()
defer pool.mu.Unlock()
@@ -459,7 +551,9 @@ func (pool *hostConnPool) HandleError(conn *Conn, err error, closed bool) {
pool.conns[i], pool.conns = pool.conns[len(pool.conns)-1], pool.conns[:len(pool.conns)-1]
// update the policy
pool.policy.SetConns(pool.conns)
conns := make([]*Conn, len(pool.conns))
copy(conns, pool.conns)
pool.policy.SetConns(conns)
// lost a connection, so fill the pool
go pool.fill()
@@ -475,10 +569,10 @@ func (pool *hostConnPool) drain() {
// empty the pool
conns := pool.conns
pool.conns = pool.conns[:0]
pool.conns = pool.conns[:0:0]
// update the policy
pool.policy.SetConns(pool.conns)
pool.policy.SetConns(nil)
// close the connections
for _, conn := range conns {

View File

@@ -3,22 +3,27 @@ package gocql
import (
"errors"
"fmt"
"log"
"math/rand"
"net"
"strconv"
"sync"
"sync/atomic"
"time"
)
// Ensure that the atomic variable is aligned to a 64bit boundary
// Ensure that the atomic variable is aligned to a 64bit boundary
// so that atomic operations can be applied on 32bit architectures.
type controlConn struct {
connecting uint64
connecting int64
session *Session
conn atomic.Value
conn atomic.Value
retry RetryPolicy
quit chan struct{}
closeWg sync.WaitGroup
quit chan struct{}
}
func createControlConn(session *Session) *controlConn {
@@ -29,12 +34,13 @@ func createControlConn(session *Session) *controlConn {
}
control.conn.Store((*Conn)(nil))
go control.heartBeat()
return control
}
func (c *controlConn) heartBeat() {
defer c.closeWg.Done()
for {
select {
case <-c.quit:
@@ -60,12 +66,84 @@ func (c *controlConn) heartBeat() {
c.reconnect(true)
// time.Sleep(5 * time.Second)
continue
}
}
func (c *controlConn) connect(endpoints []string) error {
// intial connection attmept, try to connect to each endpoint to get an initial
// list of nodes.
// shuffle endpoints so not all drivers will connect to the same initial
// node.
r := rand.New(rand.NewSource(time.Now().UnixNano()))
perm := r.Perm(len(endpoints))
shuffled := make([]string, len(endpoints))
for i, endpoint := range endpoints {
shuffled[perm[i]] = endpoint
}
// store that we are not connected so that reconnect wont happen if we error
atomic.StoreInt64(&c.connecting, -1)
var (
conn *Conn
err error
)
for _, addr := range shuffled {
conn, err = c.session.connect(JoinHostPort(addr, c.session.cfg.Port), c)
if err != nil {
log.Printf("gocql: unable to control conn dial %v: %v\n", addr, err)
continue
}
if err = c.registerEvents(conn); err != nil {
conn.Close()
continue
}
// we should fetch the initial ring here and update initial host data. So that
// when we return from here we have a ring topology ready to go.
break
}
if conn == nil {
// this is fatal, not going to connect a session
return err
}
c.conn.Store(conn)
atomic.StoreInt64(&c.connecting, 0)
c.closeWg.Add(1)
go c.heartBeat()
return nil
}
func (c *controlConn) registerEvents(conn *Conn) error {
framer, err := conn.exec(&writeRegisterFrame{
events: []string{"TOPOLOGY_CHANGE", "STATUS_CHANGE", "STATUS_CHANGE"},
}, nil)
if err != nil {
return err
}
frame, err := framer.parseFrame()
if err != nil {
return err
} else if _, ok := frame.(*readyFrame); !ok {
return fmt.Errorf("unexpected frame in response to register: got %T: %v\n", frame, frame)
}
return nil
}
func (c *controlConn) reconnect(refreshring bool) {
if !atomic.CompareAndSwapUint64(&c.connecting, 0, 1) {
// TODO: simplify this function, use session.ring to get hosts instead of the
// connection pool
if !atomic.CompareAndSwapInt64(&c.connecting, 0, 1) {
return
}
@@ -75,38 +153,65 @@ func (c *controlConn) reconnect(refreshring bool) {
if success {
go func() {
time.Sleep(500 * time.Millisecond)
atomic.StoreUint64(&c.connecting, 0)
atomic.StoreInt64(&c.connecting, 0)
}()
} else {
atomic.StoreUint64(&c.connecting, 0)
atomic.StoreInt64(&c.connecting, 0)
}
}()
addr := c.addr()
oldConn := c.conn.Load().(*Conn)
// TODO: should have our own roundrobbin for hosts so that we can try each
// in succession and guantee that we get a different host each time.
host, conn := c.session.pool.Pick(nil)
if conn == nil {
return
}
newConn, err := Connect(conn.addr, conn.cfg, c, c.session)
if err != nil {
host.Mark(err)
// TODO: add log handler for things like this
return
}
host.Mark(nil)
c.conn.Store(newConn)
success = true
if oldConn != nil {
oldConn.Close()
}
if refreshring && c.session.cfg.DiscoverHosts {
var newConn *Conn
if addr != "" {
// try to connect to the old host
conn, err := c.session.connect(addr, c)
if err != nil {
// host is dead
// TODO: this is replicated in a few places
ip, portStr, _ := net.SplitHostPort(addr)
port, _ := strconv.Atoi(portStr)
c.session.handleNodeDown(net.ParseIP(ip), port)
} else {
newConn = conn
}
}
// TODO: should have our own roundrobbin for hosts so that we can try each
// in succession and guantee that we get a different host each time.
if newConn == nil {
_, conn := c.session.pool.Pick(nil)
if conn == nil {
return
}
if conn == nil {
return
}
var err error
newConn, err = c.session.connect(conn.addr, c)
if err != nil {
// TODO: add log handler for things like this
return
}
}
if err := c.registerEvents(newConn); err != nil {
// TODO: handle this case better
newConn.Close()
log.Printf("gocql: control unable to register events: %v\n", err)
return
}
c.conn.Store(newConn)
success = true
if refreshring {
c.session.hostSource.refreshRing()
}
}
@@ -179,6 +284,46 @@ func (c *controlConn) query(statement string, values ...interface{}) (iter *Iter
return
}
func (c *controlConn) fetchHostInfo(addr net.IP, port int) (*HostInfo, error) {
// TODO(zariel): we should probably move this into host_source or atleast
// share code with it.
hostname, _, err := net.SplitHostPort(c.addr())
if err != nil {
return nil, fmt.Errorf("unable to fetch host info, invalid conn addr: %q: %v", c.addr(), err)
}
isLocal := hostname == addr.String()
var fn func(*HostInfo) error
if isLocal {
fn = func(host *HostInfo) error {
// TODO(zariel): should we fetch rpc_address from here?
iter := c.query("SELECT data_center, rack, host_id, tokens, release_version FROM system.local WHERE key='local'")
iter.Scan(&host.dataCenter, &host.rack, &host.hostId, &host.tokens, &host.version)
return iter.Close()
}
} else {
fn = func(host *HostInfo) error {
// TODO(zariel): should we fetch rpc_address from here?
iter := c.query("SELECT data_center, rack, host_id, tokens, release_version FROM system.peers WHERE peer=?", addr)
iter.Scan(&host.dataCenter, &host.rack, &host.hostId, &host.tokens, &host.version)
return iter.Close()
}
}
host := &HostInfo{
port: port,
}
if err := fn(host); err != nil {
return nil, err
}
host.peer = addr.String()
return host, nil
}
func (c *controlConn) awaitSchemaAgreement() error {
return c.withConn(func(conn *Conn) *Iter {
return &Iter{err: conn.awaitSchemaAgreement()}
@@ -196,6 +341,11 @@ func (c *controlConn) addr() string {
func (c *controlConn) close() {
// TODO: handle more gracefully
close(c.quit)
c.closeWg.Wait()
conn := c.conn.Load().(*Conn)
if conn != nil {
conn.Close()
}
}
var errNoControl = errors.New("gocql: no control connection available")

230
Godeps/_workspace/src/github.com/gocql/gocql/events.go generated vendored Normal file
View File

@@ -0,0 +1,230 @@
package gocql
import (
"log"
"net"
"sync"
"time"
)
type eventDeouncer struct {
name string
timer *time.Timer
mu sync.Mutex
events []frame
callback func([]frame)
quit chan struct{}
}
func newEventDeouncer(name string, eventHandler func([]frame)) *eventDeouncer {
e := &eventDeouncer{
name: name,
quit: make(chan struct{}),
timer: time.NewTimer(eventDebounceTime),
callback: eventHandler,
}
e.timer.Stop()
go e.flusher()
return e
}
func (e *eventDeouncer) stop() {
e.quit <- struct{}{} // sync with flusher
close(e.quit)
}
func (e *eventDeouncer) flusher() {
for {
select {
case <-e.timer.C:
e.mu.Lock()
e.flush()
e.mu.Unlock()
case <-e.quit:
return
}
}
}
const (
eventBufferSize = 1000
eventDebounceTime = 1 * time.Second
)
// flush must be called with mu locked
func (e *eventDeouncer) flush() {
if len(e.events) == 0 {
return
}
// if the flush interval is faster than the callback then we will end up calling
// the callback multiple times, probably a bad idea. In this case we could drop
// frames?
go e.callback(e.events)
e.events = make([]frame, 0, eventBufferSize)
}
func (e *eventDeouncer) debounce(frame frame) {
e.mu.Lock()
e.timer.Reset(eventDebounceTime)
// TODO: probably need a warning to track if this threshold is too low
if len(e.events) < eventBufferSize {
e.events = append(e.events, frame)
} else {
log.Printf("%s: buffer full, dropping event frame: %s", e.name, frame)
}
e.mu.Unlock()
}
func (s *Session) handleNodeEvent(frames []frame) {
type nodeEvent struct {
change string
host net.IP
port int
}
events := make(map[string]*nodeEvent)
for _, frame := range frames {
// TODO: can we be sure the order of events in the buffer is correct?
switch f := frame.(type) {
case *topologyChangeEventFrame:
event, ok := events[f.host.String()]
if !ok {
event = &nodeEvent{change: f.change, host: f.host, port: f.port}
events[f.host.String()] = event
}
event.change = f.change
case *statusChangeEventFrame:
event, ok := events[f.host.String()]
if !ok {
event = &nodeEvent{change: f.change, host: f.host, port: f.port}
events[f.host.String()] = event
}
event.change = f.change
}
}
for _, f := range events {
switch f.change {
case "NEW_NODE":
s.handleNewNode(f.host, f.port, true)
case "REMOVED_NODE":
s.handleRemovedNode(f.host, f.port)
case "MOVED_NODE":
// java-driver handles this, not mentioned in the spec
// TODO(zariel): refresh token map
case "UP":
s.handleNodeUp(f.host, f.port, true)
case "DOWN":
s.handleNodeDown(f.host, f.port)
}
}
}
func (s *Session) handleEvent(framer *framer) {
// TODO(zariel): need to debounce events frames, and possible also events
defer framerPool.Put(framer)
frame, err := framer.parseFrame()
if err != nil {
// TODO: logger
log.Printf("gocql: unable to parse event frame: %v\n", err)
return
}
// TODO: handle medatadata events
switch f := frame.(type) {
case *schemaChangeKeyspace:
case *schemaChangeFunction:
case *schemaChangeTable:
case *topologyChangeEventFrame, *statusChangeEventFrame:
s.nodeEvents.debounce(frame)
default:
log.Printf("gocql: invalid event frame (%T): %v\n", f, f)
}
}
func (s *Session) handleNewNode(host net.IP, port int, waitForBinary bool) {
// TODO(zariel): need to be able to filter discovered nodes
var hostInfo *HostInfo
if s.control != nil {
var err error
hostInfo, err = s.control.fetchHostInfo(host, port)
if err != nil {
log.Printf("gocql: events: unable to fetch host info for %v: %v\n", host, err)
return
}
} else {
hostInfo = &HostInfo{peer: host.String(), port: port, state: NodeUp}
}
// TODO: remove this when the host selection policy is more sophisticated
if !s.cfg.Discovery.matchFilter(hostInfo) {
return
}
if t := hostInfo.Version().nodeUpDelay(); t > 0 && waitForBinary {
time.Sleep(t)
}
// should this handle token moving?
if existing, ok := s.ring.addHostIfMissing(hostInfo); !ok {
existing.update(hostInfo)
hostInfo = existing
}
s.pool.addHost(hostInfo)
if s.control != nil {
s.hostSource.refreshRing()
}
}
func (s *Session) handleRemovedNode(ip net.IP, port int) {
// we remove all nodes but only add ones which pass the filter
addr := ip.String()
s.pool.removeHost(addr)
s.ring.removeHost(addr)
s.hostSource.refreshRing()
}
func (s *Session) handleNodeUp(ip net.IP, port int, waitForBinary bool) {
addr := ip.String()
host := s.ring.getHost(addr)
if host != nil {
// TODO: remove this when the host selection policy is more sophisticated
if !s.cfg.Discovery.matchFilter(host) {
return
}
if t := host.Version().nodeUpDelay(); t > 0 && waitForBinary {
time.Sleep(t)
}
host.setState(NodeUp)
s.pool.hostUp(host)
return
}
s.handleNewNode(ip, port, waitForBinary)
}
func (s *Session) handleNodeDown(ip net.IP, port int) {
addr := ip.String()
host := s.ring.getHost(addr)
if host != nil {
host.setState(NodeDown)
}
s.pool.hostDown(addr)
}

View File

@@ -0,0 +1,43 @@
package gocql
// HostFilter interface is used when a host is discovered via server sent events.
type HostFilter interface {
// Called when a new host is discovered, returning true will cause the host
// to be added to the pools.
Accept(host *HostInfo) bool
}
// HostFilterFunc converts a func(host HostInfo) bool into a HostFilter
type HostFilterFunc func(host *HostInfo) bool
func (fn HostFilterFunc) Accept(host *HostInfo) bool {
return fn(host)
}
// AcceptAllFilter will accept all hosts
func AcceptAllFilterfunc() HostFilter {
return HostFilterFunc(func(host *HostInfo) bool {
return true
})
}
// DataCentreHostFilter filters all hosts such that they are in the same data centre
// as the supplied data centre.
func DataCentreHostFilter(dataCentre string) HostFilter {
return HostFilterFunc(func(host *HostInfo) bool {
return host.DataCenter() == dataCentre
})
}
// WhiteListHostFilter filters incoming hosts by checking that their address is
// in the initial hosts whitelist.
func WhiteListHostFilter(hosts ...string) HostFilter {
m := make(map[string]bool, len(hosts))
for _, host := range hosts {
m[host] = true
}
return HostFilterFunc(func(host *HostInfo) bool {
return m[host.Peer()]
})
}

View File

@@ -228,7 +228,7 @@ const (
)
var (
ErrFrameTooBig = errors.New("frame length is bigger than the maximum alowed")
ErrFrameTooBig = errors.New("frame length is bigger than the maximum allowed")
)
func writeInt(p []byte, n int32) {
@@ -346,7 +346,7 @@ func readHeader(r io.Reader, p []byte) (head frameHeader, err error) {
version := p[0] & protoVersionMask
if version < protoVersion1 || version > protoVersion4 {
err = fmt.Errorf("gocql: invalid version: %x", version)
err = fmt.Errorf("gocql: invalid version: %d", version)
return
}
@@ -446,7 +446,7 @@ func (f *framer) parseFrame() (frame frame, err error) {
}
}
// asumes that the frame body has been read into rbuf
// assumes that the frame body has been read into rbuf
switch f.header.op {
case opError:
frame = f.parseErrorFrame()
@@ -462,6 +462,8 @@ func (f *framer) parseFrame() (frame frame, err error) {
frame = f.parseAuthChallengeFrame()
case opAuthSuccess:
frame = f.parseAuthSuccessFrame()
case opEvent:
frame = f.parseEventFrame()
default:
return nil, NewErrProtocol("unknown op in frame header: %s", f.header.op)
}
@@ -592,7 +594,7 @@ func (f *framer) setLength(length int) {
func (f *framer) finishWrite() error {
if len(f.wbuf) > maxFrameSize {
// huge app frame, lets remove it so it doesnt bloat the heap
// huge app frame, lets remove it so it doesn't bloat the heap
f.wbuf = make([]byte, defaultBufSize)
return ErrFrameTooBig
}
@@ -1064,7 +1066,7 @@ func (f *framer) parseResultSchemaChange() frame {
change := f.readString()
target := f.readString()
// TODO: could just use a seperate type for each target
// TODO: could just use a separate type for each target
switch target {
case "KEYSPACE":
frame := &schemaChangeKeyspace{
@@ -1154,6 +1156,56 @@ func (f *framer) parseAuthChallengeFrame() frame {
}
}
type statusChangeEventFrame struct {
frameHeader
change string
host net.IP
port int
}
func (t statusChangeEventFrame) String() string {
return fmt.Sprintf("[status_change change=%s host=%v port=%v]", t.change, t.host, t.port)
}
// essentially the same as statusChange
type topologyChangeEventFrame struct {
frameHeader
change string
host net.IP
port int
}
func (t topologyChangeEventFrame) String() string {
return fmt.Sprintf("[topology_change change=%s host=%v port=%v]", t.change, t.host, t.port)
}
func (f *framer) parseEventFrame() frame {
eventType := f.readString()
switch eventType {
case "TOPOLOGY_CHANGE":
frame := &topologyChangeEventFrame{frameHeader: *f.header}
frame.change = f.readString()
frame.host, frame.port = f.readInet()
return frame
case "STATUS_CHANGE":
frame := &statusChangeEventFrame{frameHeader: *f.header}
frame.change = f.readString()
frame.host, frame.port = f.readInet()
return frame
case "SCHEMA_CHANGE":
// this should work for all versions
return f.parseResultSchemaChange()
default:
panic(fmt.Errorf("gocql: unknown event type: %q", eventType))
}
}
type writeAuthResponseFrame struct {
data []byte
}
@@ -1408,6 +1460,21 @@ func (f *framer) writeOptionsFrame(stream int, _ *writeOptionsFrame) error {
return f.finishWrite()
}
type writeRegisterFrame struct {
events []string
}
func (w *writeRegisterFrame) writeFrame(framer *framer, streamID int) error {
return framer.writeRegisterFrame(streamID, w)
}
func (f *framer) writeRegisterFrame(streamID int, w *writeRegisterFrame) error {
f.writeHeader(f.flags, opRegister, streamID)
f.writeStringList(w.events)
return f.finishWrite()
}
func (f *framer) readByte() byte {
if len(f.rbuf) < 1 {
panic(fmt.Errorf("not enough bytes in buffer to read byte require 1 got: %d", len(f.rbuf)))

View File

@@ -5,6 +5,7 @@
package gocql
import (
"fmt"
"math/big"
"reflect"
"strings"
@@ -20,7 +21,7 @@ type RowData struct {
func goType(t TypeInfo) reflect.Type {
switch t.Type() {
case TypeVarchar, TypeAscii, TypeInet:
case TypeVarchar, TypeAscii, TypeInet, TypeText:
return reflect.TypeOf(*new(string))
case TypeBigInt, TypeCounter:
return reflect.TypeOf(*new(int64))
@@ -128,16 +129,34 @@ func (r *RowData) rowMap(m map[string]interface{}) {
}
}
// TupeColumnName will return the column name of a tuple value in a column named
// c at index n. It should be used if a specific element within a tuple is needed
// to be extracted from a map returned from SliceMap or MapScan.
func TupleColumnName(c string, n int) string {
return fmt.Sprintf("%s[%d]", c, n)
}
func (iter *Iter) RowData() (RowData, error) {
if iter.err != nil {
return RowData{}, iter.err
}
columns := make([]string, 0)
values := make([]interface{}, 0)
for _, column := range iter.Columns() {
val := column.TypeInfo.New()
columns = append(columns, column.Name)
values = append(values, val)
switch c := column.TypeInfo.(type) {
case TupleTypeInfo:
for i, elem := range c.Elems {
columns = append(columns, TupleColumnName(column.Name, i))
values = append(values, elem.New())
}
default:
val := column.TypeInfo.New()
columns = append(columns, column.Name)
values = append(values, val)
}
}
rowData := RowData{
Columns: columns,

View File

@@ -2,36 +2,231 @@ package gocql
import (
"fmt"
"log"
"net"
"strconv"
"strings"
"sync"
"time"
)
type HostInfo struct {
Peer string
DataCenter string
Rack string
HostId string
Tokens []string
type nodeState int32
func (n nodeState) String() string {
if n == NodeUp {
return "UP"
} else if n == NodeDown {
return "DOWN"
}
return fmt.Sprintf("UNKNOWN_%d", n)
}
func (h HostInfo) String() string {
return fmt.Sprintf("[hostinfo peer=%q data_centre=%q rack=%q host_id=%q num_tokens=%d]", h.Peer, h.DataCenter, h.Rack, h.HostId, len(h.Tokens))
const (
NodeUp nodeState = iota
NodeDown
)
type cassVersion struct {
Major, Minor, Patch int
}
func (c *cassVersion) UnmarshalCQL(info TypeInfo, data []byte) error {
version := strings.TrimSuffix(string(data), "-SNAPSHOT")
v := strings.Split(version, ".")
if len(v) < 3 {
return fmt.Errorf("invalid schema_version: %v", string(data))
}
var err error
c.Major, err = strconv.Atoi(v[0])
if err != nil {
return fmt.Errorf("invalid major version %v: %v", v[0], err)
}
c.Minor, err = strconv.Atoi(v[1])
if err != nil {
return fmt.Errorf("invalid minor version %v: %v", v[1], err)
}
c.Patch, err = strconv.Atoi(v[2])
if err != nil {
return fmt.Errorf("invalid patch version %v: %v", v[2], err)
}
return nil
}
func (c cassVersion) String() string {
return fmt.Sprintf("v%d.%d.%d", c.Major, c.Minor, c.Patch)
}
func (c cassVersion) nodeUpDelay() time.Duration {
if c.Major >= 2 && c.Minor >= 2 {
// CASSANDRA-8236
return 0
}
return 10 * time.Second
}
type HostInfo struct {
// TODO(zariel): reduce locking maybe, not all values will change, but to ensure
// that we are thread safe use a mutex to access all fields.
mu sync.RWMutex
peer string
port int
dataCenter string
rack string
hostId string
version cassVersion
state nodeState
tokens []string
}
func (h *HostInfo) Equal(host *HostInfo) bool {
h.mu.RLock()
defer h.mu.RUnlock()
host.mu.RLock()
defer host.mu.RUnlock()
return h.peer == host.peer && h.hostId == host.hostId
}
func (h *HostInfo) Peer() string {
h.mu.RLock()
defer h.mu.RUnlock()
return h.peer
}
func (h *HostInfo) setPeer(peer string) *HostInfo {
h.mu.Lock()
defer h.mu.Unlock()
h.peer = peer
return h
}
func (h *HostInfo) DataCenter() string {
h.mu.RLock()
defer h.mu.RUnlock()
return h.dataCenter
}
func (h *HostInfo) setDataCenter(dataCenter string) *HostInfo {
h.mu.Lock()
defer h.mu.Unlock()
h.dataCenter = dataCenter
return h
}
func (h *HostInfo) Rack() string {
h.mu.RLock()
defer h.mu.RUnlock()
return h.rack
}
func (h *HostInfo) setRack(rack string) *HostInfo {
h.mu.Lock()
defer h.mu.Unlock()
h.rack = rack
return h
}
func (h *HostInfo) HostID() string {
h.mu.RLock()
defer h.mu.RUnlock()
return h.hostId
}
func (h *HostInfo) setHostID(hostID string) *HostInfo {
h.mu.Lock()
defer h.mu.Unlock()
h.hostId = hostID
return h
}
func (h *HostInfo) Version() cassVersion {
h.mu.RLock()
defer h.mu.RUnlock()
return h.version
}
func (h *HostInfo) setVersion(major, minor, patch int) *HostInfo {
h.mu.Lock()
defer h.mu.Unlock()
h.version = cassVersion{major, minor, patch}
return h
}
func (h *HostInfo) State() nodeState {
h.mu.RLock()
defer h.mu.RUnlock()
return h.state
}
func (h *HostInfo) setState(state nodeState) *HostInfo {
h.mu.Lock()
defer h.mu.Unlock()
h.state = state
return h
}
func (h *HostInfo) Tokens() []string {
h.mu.RLock()
defer h.mu.RUnlock()
return h.tokens
}
func (h *HostInfo) setTokens(tokens []string) *HostInfo {
h.mu.Lock()
defer h.mu.Unlock()
h.tokens = tokens
return h
}
func (h *HostInfo) Port() int {
h.mu.RLock()
defer h.mu.RUnlock()
return h.port
}
func (h *HostInfo) setPort(port int) *HostInfo {
h.mu.Lock()
defer h.mu.Unlock()
h.port = port
return h
}
func (h *HostInfo) update(from *HostInfo) {
h.mu.Lock()
defer h.mu.Unlock()
h.tokens = from.tokens
h.version = from.version
h.hostId = from.hostId
h.dataCenter = from.dataCenter
}
func (h *HostInfo) IsUp() bool {
return h.State() == NodeUp
}
func (h *HostInfo) String() string {
h.mu.RLock()
defer h.mu.RUnlock()
return fmt.Sprintf("[hostinfo peer=%q port=%d data_centre=%q rack=%q host_id=%q version=%q state=%s num_tokens=%d]", h.peer, h.port, h.dataCenter, h.rack, h.hostId, h.version, h.state, len(h.tokens))
}
// Polls system.peers at a specific interval to find new hosts
type ringDescriber struct {
dcFilter string
rackFilter string
prevHosts []HostInfo
prevPartitioner string
session *Session
closeChan chan bool
dcFilter string
rackFilter string
session *Session
closeChan chan bool
// indicates that we can use system.local to get the connections remote address
localHasRpcAddr bool
mu sync.Mutex
mu sync.Mutex
prevHosts []*HostInfo
prevPartitioner string
}
func checkSystemLocal(control *controlConn) (bool, error) {
@@ -49,27 +244,27 @@ func checkSystemLocal(control *controlConn) (bool, error) {
return true, nil
}
func (r *ringDescriber) GetHosts() (hosts []HostInfo, partitioner string, err error) {
func (r *ringDescriber) GetHosts() (hosts []*HostInfo, partitioner string, err error) {
r.mu.Lock()
defer r.mu.Unlock()
// we need conn to be the same because we need to query system.peers and system.local
// on the same node to get the whole cluster
const (
legacyLocalQuery = "SELECT data_center, rack, host_id, tokens, partitioner FROM system.local"
legacyLocalQuery = "SELECT data_center, rack, host_id, tokens, partitioner, release_version FROM system.local"
// only supported in 2.2.0, 2.1.6, 2.0.16
localQuery = "SELECT broadcast_address, data_center, rack, host_id, tokens, partitioner FROM system.local"
localQuery = "SELECT broadcast_address, data_center, rack, host_id, tokens, partitioner, release_version FROM system.local"
)
var localHost HostInfo
localHost := &HostInfo{}
if r.localHasRpcAddr {
iter := r.session.control.query(localQuery)
if iter == nil {
return r.prevHosts, r.prevPartitioner, nil
}
iter.Scan(&localHost.Peer, &localHost.DataCenter, &localHost.Rack,
&localHost.HostId, &localHost.Tokens, &partitioner)
iter.Scan(&localHost.peer, &localHost.dataCenter, &localHost.rack,
&localHost.hostId, &localHost.tokens, &partitioner, &localHost.version)
if err = iter.Close(); err != nil {
return nil, "", err
@@ -80,7 +275,7 @@ func (r *ringDescriber) GetHosts() (hosts []HostInfo, partitioner string, err er
return r.prevHosts, r.prevPartitioner, nil
}
iter.Scan(&localHost.DataCenter, &localHost.Rack, &localHost.HostId, &localHost.Tokens, &partitioner)
iter.Scan(&localHost.dataCenter, &localHost.rack, &localHost.hostId, &localHost.tokens, &partitioner, &localHost.version)
if err = iter.Close(); err != nil {
return nil, "", err
@@ -93,22 +288,26 @@ func (r *ringDescriber) GetHosts() (hosts []HostInfo, partitioner string, err er
panic(err)
}
localHost.Peer = addr
localHost.peer = addr
}
hosts = []HostInfo{localHost}
localHost.port = r.session.cfg.Port
iter := r.session.control.query("SELECT rpc_address, data_center, rack, host_id, tokens FROM system.peers")
hosts = []*HostInfo{localHost}
iter := r.session.control.query("SELECT rpc_address, data_center, rack, host_id, tokens, release_version FROM system.peers")
if iter == nil {
return r.prevHosts, r.prevPartitioner, nil
}
host := HostInfo{}
for iter.Scan(&host.Peer, &host.DataCenter, &host.Rack, &host.HostId, &host.Tokens) {
if r.matchFilter(&host) {
host := &HostInfo{port: r.session.cfg.Port}
for iter.Scan(&host.peer, &host.dataCenter, &host.rack, &host.hostId, &host.tokens, &host.version) {
if r.matchFilter(host) {
hosts = append(hosts, host)
}
host = HostInfo{}
host = &HostInfo{
port: r.session.cfg.Port,
}
}
if err = iter.Close(); err != nil {
@@ -122,45 +321,37 @@ func (r *ringDescriber) GetHosts() (hosts []HostInfo, partitioner string, err er
}
func (r *ringDescriber) matchFilter(host *HostInfo) bool {
if r.dcFilter != "" && r.dcFilter != host.DataCenter {
if r.dcFilter != "" && r.dcFilter != host.DataCenter() {
return false
}
if r.rackFilter != "" && r.rackFilter != host.Rack {
if r.rackFilter != "" && r.rackFilter != host.Rack() {
return false
}
return true
}
func (r *ringDescriber) refreshRing() {
func (r *ringDescriber) refreshRing() error {
// if we have 0 hosts this will return the previous list of hosts to
// attempt to reconnect to the cluster otherwise we would never find
// downed hosts again, could possibly have an optimisation to only
// try to add new hosts if GetHosts didnt error and the hosts didnt change.
hosts, partitioner, err := r.GetHosts()
if err != nil {
log.Println("RingDescriber: unable to get ring topology:", err)
return
return err
}
r.session.pool.SetHosts(hosts)
r.session.pool.SetPartitioner(partitioner)
}
func (r *ringDescriber) run(sleep time.Duration) {
if sleep == 0 {
sleep = 30 * time.Second
}
for {
r.refreshRing()
select {
case <-time.After(sleep):
case <-r.closeChan:
return
// TODO: move this to session
// TODO: handle removing hosts here
for _, h := range hosts {
if host, ok := r.session.ring.addHostIfMissing(h); !ok {
r.session.pool.addHost(h)
} else {
host.update(h)
}
}
r.session.pool.SetPartitioner(partitioner)
return nil
}

View File

@@ -31,7 +31,7 @@ function run_tests() {
ccm remove test || true
ccm create test -v binary:$version -n $clusterSize -d --vnodes --jvm_arg="-Xmx256m -XX:NewSize=100m"
ccm create test -v $version -n $clusterSize -d --vnodes --jvm_arg="-Xmx256m -XX:NewSize=100m"
ccm updateconf "${conf[@]}"
if [ "$auth" = true ]
@@ -52,6 +52,9 @@ function run_tests() {
ccm updateconf 'enable_user_defined_functions: true'
fi
sleep 1s
ccm list
ccm start -v
ccm status
ccm node1 nodetool status
@@ -62,7 +65,7 @@ function run_tests() {
go test -v . -timeout 15s -run=TestAuthentication -tags integration -runssl -runauth -proto=$proto -cluster=$(ccm liveset) -clusterSize=$clusterSize -autowait=1000ms
else
go test -timeout 5m -tags integration -v -gocql.timeout=10s -runssl -proto=$proto -rf=3 -cluster=$(ccm liveset) -clusterSize=$clusterSize -autowait=2000ms -compressor=snappy ./...
go test -timeout 10m -tags integration -v -gocql.timeout=10s -runssl -proto=$proto -rf=3 -cluster=$(ccm liveset) -clusterSize=$clusterSize -autowait=2000ms -compressor=snappy ./...
if [ ${PIPESTATUS[0]} -ne 0 ]; then
echo "--- FAIL: ccm status follows:"
@@ -73,6 +76,8 @@ function run_tests() {
echo "--- FAIL: Received a non-zero exit code from the go test execution, please investigate this"
exit 1
fi
go test -timeout 10m -tags ccm -v -gocql.timeout=10s -runssl -proto=$proto -rf=3 -cluster=$(ccm liveset) -clusterSize=$clusterSize -autowait=2000ms -compressor=snappy ./...
fi
ccm remove

View File

@@ -62,7 +62,7 @@ func Marshal(info TypeInfo, value interface{}) ([]byte, error) {
}
switch info.Type() {
case TypeVarchar, TypeAscii, TypeBlob:
case TypeVarchar, TypeAscii, TypeBlob, TypeText:
return marshalVarchar(info, value)
case TypeBoolean:
return marshalBool(info, value)
@@ -115,7 +115,7 @@ func Unmarshal(info TypeInfo, data []byte, value interface{}) error {
}
switch info.Type() {
case TypeVarchar, TypeAscii, TypeBlob:
case TypeVarchar, TypeAscii, TypeBlob, TypeText:
return unmarshalVarchar(info, data, value)
case TypeBoolean:
return unmarshalBool(info, data, value)
@@ -1216,7 +1216,11 @@ func unmarshalInet(info TypeInfo, data []byte, value interface{}) error {
case Unmarshaler:
return v.UnmarshalCQL(info, data)
case *net.IP:
ip := net.IP(data)
if x := len(data); !(x == 4 || x == 16) {
return unmarshalErrorf("cannot unmarshal %s into %T: invalid sized IP: got %d bytes not 4 or 16", info, value, x)
}
buf := copyBytes(data)
ip := net.IP(buf)
if v4 := ip.To4(); v4 != nil {
*v = v4
return nil
@@ -1616,6 +1620,10 @@ type TupleTypeInfo struct {
Elems []TypeInfo
}
func (t TupleTypeInfo) New() interface{} {
return reflect.New(goType(t)).Interface()
}
type UDTField struct {
Name string
Type TypeInfo
@@ -1628,6 +1636,10 @@ type UDTTypeInfo struct {
Elements []UDTField
}
func (u UDTTypeInfo) New() interface{} {
return reflect.New(goType(u)).Interface()
}
func (u UDTTypeInfo) String() string {
buf := &bytes.Buffer{}
@@ -1663,6 +1675,7 @@ const (
TypeDouble Type = 0x0007
TypeFloat Type = 0x0008
TypeInt Type = 0x0009
TypeText Type = 0x000A
TypeTimestamp Type = 0x000B
TypeUUID Type = 0x000C
TypeVarchar Type = 0x000D
@@ -1699,6 +1712,8 @@ func (t Type) String() string {
return "float"
case TypeInt:
return "int"
case TypeText:
return "text"
case TypeTimestamp:
return "timestamp"
case TypeUUID:

View File

@@ -5,6 +5,7 @@
package gocql
import (
"fmt"
"log"
"sync"
"sync/atomic"
@@ -12,6 +13,114 @@ import (
"github.com/hailocab/go-hostpool"
)
// cowHostList implements a copy on write host list, its equivilent type is []*HostInfo
type cowHostList struct {
list atomic.Value
mu sync.Mutex
}
func (c *cowHostList) String() string {
return fmt.Sprintf("%+v", c.get())
}
func (c *cowHostList) get() []*HostInfo {
// TODO(zariel): should we replace this with []*HostInfo?
l, ok := c.list.Load().(*[]*HostInfo)
if !ok {
return nil
}
return *l
}
func (c *cowHostList) set(list []*HostInfo) {
c.mu.Lock()
c.list.Store(&list)
c.mu.Unlock()
}
// add will add a host if it not already in the list
func (c *cowHostList) add(host *HostInfo) bool {
c.mu.Lock()
l := c.get()
if n := len(l); n == 0 {
l = []*HostInfo{host}
} else {
newL := make([]*HostInfo, n+1)
for i := 0; i < n; i++ {
if host.Equal(l[i]) {
c.mu.Unlock()
return false
}
newL[i] = l[i]
}
newL[n] = host
l = newL
}
c.list.Store(&l)
c.mu.Unlock()
return true
}
func (c *cowHostList) update(host *HostInfo) {
c.mu.Lock()
l := c.get()
if len(l) == 0 {
c.mu.Unlock()
return
}
found := false
newL := make([]*HostInfo, len(l))
for i := range l {
if host.Equal(l[i]) {
newL[i] = host
found = true
} else {
newL[i] = l[i]
}
}
if found {
c.list.Store(&newL)
}
c.mu.Unlock()
}
func (c *cowHostList) remove(addr string) bool {
c.mu.Lock()
l := c.get()
size := len(l)
if size == 0 {
c.mu.Unlock()
return false
}
found := false
newL := make([]*HostInfo, 0, size)
for i := 0; i < len(l); i++ {
if l[i].Peer() != addr {
newL = append(newL, l[i])
} else {
found = true
}
}
if !found {
c.mu.Unlock()
return false
}
newL = newL[:size-1 : size-1]
c.list.Store(&newL)
c.mu.Unlock()
return true
}
// RetryableQuery is an interface that represents a query or batch statement that
// exposes the correct functions for the retry policy logic to evaluate correctly.
type RetryableQuery interface {
@@ -50,9 +159,16 @@ func (s *SimpleRetryPolicy) Attempt(q RetryableQuery) bool {
return q.Attempts() <= s.NumRetries
}
type HostStateNotifier interface {
AddHost(host *HostInfo)
RemoveHost(addr string)
// TODO(zariel): add host up/down
}
// HostSelectionPolicy is an interface for selecting
// the most appropriate host to execute a given query.
type HostSelectionPolicy interface {
HostStateNotifier
SetHosts
SetPartitioner
//Pick returns an iteration function over selected hosts
@@ -72,19 +188,17 @@ type NextHost func() SelectedHost
// RoundRobinHostPolicy is a round-robin load balancing policy, where each host
// is tried sequentially for each query.
func RoundRobinHostPolicy() HostSelectionPolicy {
return &roundRobinHostPolicy{hosts: []HostInfo{}}
return &roundRobinHostPolicy{}
}
type roundRobinHostPolicy struct {
hosts []HostInfo
hosts cowHostList
pos uint32
mu sync.RWMutex
}
func (r *roundRobinHostPolicy) SetHosts(hosts []HostInfo) {
r.mu.Lock()
r.hosts = hosts
r.mu.Unlock()
func (r *roundRobinHostPolicy) SetHosts(hosts []*HostInfo) {
r.hosts.set(hosts)
}
func (r *roundRobinHostPolicy) SetPartitioner(partitioner string) {
@@ -96,24 +210,31 @@ func (r *roundRobinHostPolicy) Pick(qry *Query) NextHost {
// to the number of hosts known to this policy
var i int
return func() SelectedHost {
r.mu.RLock()
defer r.mu.RUnlock()
if len(r.hosts) == 0 {
hosts := r.hosts.get()
if len(hosts) == 0 {
return nil
}
// always increment pos to evenly distribute traffic in case of
// failures
pos := atomic.AddUint32(&r.pos, 1)
if i >= len(r.hosts) {
pos := atomic.AddUint32(&r.pos, 1) - 1
if i >= len(hosts) {
return nil
}
host := &r.hosts[(pos)%uint32(len(r.hosts))]
host := hosts[(pos)%uint32(len(hosts))]
i++
return selectedRoundRobinHost{host}
}
}
func (r *roundRobinHostPolicy) AddHost(host *HostInfo) {
r.hosts.add(host)
}
func (r *roundRobinHostPolicy) RemoveHost(addr string) {
r.hosts.remove(addr)
}
// selectedRoundRobinHost is a host returned by the roundRobinHostPolicy and
// implements the SelectedHost interface
type selectedRoundRobinHost struct {
@@ -132,24 +253,25 @@ func (host selectedRoundRobinHost) Mark(err error) {
// selected based on the partition key, so queries are sent to the host which
// owns the partition. Fallback is used when routing information is not available.
func TokenAwareHostPolicy(fallback HostSelectionPolicy) HostSelectionPolicy {
return &tokenAwareHostPolicy{fallback: fallback, hosts: []HostInfo{}}
return &tokenAwareHostPolicy{fallback: fallback}
}
type tokenAwareHostPolicy struct {
hosts cowHostList
mu sync.RWMutex
hosts []HostInfo
partitioner string
tokenRing *tokenRing
fallback HostSelectionPolicy
}
func (t *tokenAwareHostPolicy) SetHosts(hosts []HostInfo) {
func (t *tokenAwareHostPolicy) SetHosts(hosts []*HostInfo) {
t.hosts.set(hosts)
t.mu.Lock()
defer t.mu.Unlock()
// always update the fallback
t.fallback.SetHosts(hosts)
t.hosts = hosts
t.resetTokenRing()
}
@@ -166,6 +288,23 @@ func (t *tokenAwareHostPolicy) SetPartitioner(partitioner string) {
}
}
func (t *tokenAwareHostPolicy) AddHost(host *HostInfo) {
t.hosts.add(host)
t.fallback.AddHost(host)
t.mu.Lock()
t.resetTokenRing()
t.mu.Unlock()
}
func (t *tokenAwareHostPolicy) RemoveHost(addr string) {
t.hosts.remove(addr)
t.mu.Lock()
t.resetTokenRing()
t.mu.Unlock()
}
func (t *tokenAwareHostPolicy) resetTokenRing() {
if t.partitioner == "" {
// partitioner not yet set
@@ -173,7 +312,8 @@ func (t *tokenAwareHostPolicy) resetTokenRing() {
}
// create a new token ring
tokenRing, err := newTokenRing(t.partitioner, t.hosts)
hosts := t.hosts.get()
tokenRing, err := newTokenRing(t.partitioner, hosts)
if err != nil {
log.Printf("Unable to update the token ring due to error: %s", err)
return
@@ -215,6 +355,7 @@ func (t *tokenAwareHostPolicy) Pick(qry *Query) NextHost {
hostReturned bool
fallbackIter NextHost
)
return func() SelectedHost {
if !hostReturned {
hostReturned = true
@@ -266,22 +407,22 @@ func (host selectedTokenAwareHost) Mark(err error) {
// )
//
func HostPoolHostPolicy(hp hostpool.HostPool) HostSelectionPolicy {
return &hostPoolHostPolicy{hostMap: map[string]HostInfo{}, hp: hp}
return &hostPoolHostPolicy{hostMap: map[string]*HostInfo{}, hp: hp}
}
type hostPoolHostPolicy struct {
hp hostpool.HostPool
hostMap map[string]HostInfo
mu sync.RWMutex
hostMap map[string]*HostInfo
}
func (r *hostPoolHostPolicy) SetHosts(hosts []HostInfo) {
func (r *hostPoolHostPolicy) SetHosts(hosts []*HostInfo) {
peers := make([]string, len(hosts))
hostMap := make(map[string]HostInfo, len(hosts))
hostMap := make(map[string]*HostInfo, len(hosts))
for i, host := range hosts {
peers[i] = host.Peer
hostMap[host.Peer] = host
peers[i] = host.Peer()
hostMap[host.Peer()] = host
}
r.mu.Lock()
@@ -290,6 +431,41 @@ func (r *hostPoolHostPolicy) SetHosts(hosts []HostInfo) {
r.mu.Unlock()
}
func (r *hostPoolHostPolicy) AddHost(host *HostInfo) {
r.mu.Lock()
defer r.mu.Unlock()
if _, ok := r.hostMap[host.Peer()]; ok {
return
}
hosts := make([]string, 0, len(r.hostMap)+1)
for addr := range r.hostMap {
hosts = append(hosts, addr)
}
hosts = append(hosts, host.Peer())
r.hp.SetHosts(hosts)
r.hostMap[host.Peer()] = host
}
func (r *hostPoolHostPolicy) RemoveHost(addr string) {
r.mu.Lock()
defer r.mu.Unlock()
if _, ok := r.hostMap[addr]; !ok {
return
}
delete(r.hostMap, addr)
hosts := make([]string, 0, len(r.hostMap))
for addr := range r.hostMap {
hosts = append(hosts, addr)
}
r.hp.SetHosts(hosts)
}
func (r *hostPoolHostPolicy) SetPartitioner(partitioner string) {
// noop
}
@@ -309,7 +485,7 @@ func (r *hostPoolHostPolicy) Pick(qry *Query) NextHost {
return nil
}
return selectedHostPoolHost{&host, hostR}
return selectedHostPoolHost{host, hostR}
}
}

75
Godeps/_workspace/src/github.com/gocql/gocql/ring.go generated vendored Normal file
View File

@@ -0,0 +1,75 @@
package gocql
import (
"sync"
)
type ring struct {
// endpoints are the set of endpoints which the driver will attempt to connect
// to in the case it can not reach any of its hosts. They are also used to boot
// strap the initial connection.
endpoints []string
// hosts are the set of all hosts in the cassandra ring that we know of
mu sync.RWMutex
hosts map[string]*HostInfo
// TODO: we should store the ring metadata here also.
}
func (r *ring) getHost(addr string) *HostInfo {
r.mu.RLock()
host := r.hosts[addr]
r.mu.RUnlock()
return host
}
func (r *ring) allHosts() []*HostInfo {
r.mu.RLock()
hosts := make([]*HostInfo, 0, len(r.hosts))
for _, host := range r.hosts {
hosts = append(hosts, host)
}
r.mu.RUnlock()
return hosts
}
func (r *ring) addHost(host *HostInfo) bool {
r.mu.Lock()
if r.hosts == nil {
r.hosts = make(map[string]*HostInfo)
}
addr := host.Peer()
_, ok := r.hosts[addr]
r.hosts[addr] = host
r.mu.Unlock()
return ok
}
func (r *ring) addHostIfMissing(host *HostInfo) (*HostInfo, bool) {
r.mu.Lock()
if r.hosts == nil {
r.hosts = make(map[string]*HostInfo)
}
addr := host.Peer()
existing, ok := r.hosts[addr]
if !ok {
r.hosts[addr] = host
existing = host
}
r.mu.Unlock()
return existing, ok
}
func (r *ring) removeHost(addr string) bool {
r.mu.Lock()
if r.hosts == nil {
r.hosts = make(map[string]*HostInfo)
}
_, ok := r.hosts[addr]
delete(r.hosts, addr)
r.mu.Unlock()
return ok
}

View File

@@ -10,7 +10,8 @@ import (
"errors"
"fmt"
"io"
"log"
"net"
"strconv"
"strings"
"sync"
"time"
@@ -37,10 +38,22 @@ type Session struct {
schemaDescriber *schemaDescriber
trace Tracer
hostSource *ringDescriber
mu sync.RWMutex
ring ring
connCfg *ConnConfig
mu sync.RWMutex
hostFilter HostFilter
control *controlConn
// event handlers
nodeEvents *eventDeouncer
// ring metadata
hosts []HostInfo
cfg ClusterConfig
closeMu sync.RWMutex
@@ -66,49 +79,75 @@ func NewSession(cfg ClusterConfig) (*Session, error) {
pageSize: cfg.PageSize,
}
pool, err := cfg.PoolConfig.buildPool(s)
connCfg, err := connConfig(s)
if err != nil {
return nil, err
}
s.pool = pool
// See if there are any connections in the pool
if pool.Size() == 0 {
s.Close()
return nil, ErrNoConnectionsStarted
return nil, fmt.Errorf("gocql: unable to create session: %v", err)
}
s.connCfg = connCfg
s.nodeEvents = newEventDeouncer("NodeEvents", s.handleNodeEvent)
s.routingKeyInfoCache.lru = lru.New(cfg.MaxRoutingKeyInfo)
// I think it might be a good idea to simplify this and make it always discover
// hosts, maybe with more filters.
if cfg.DiscoverHosts {
s.hostSource = &ringDescriber{
session: s,
dcFilter: cfg.Discovery.DcFilter,
rackFilter: cfg.Discovery.RackFilter,
closeChan: make(chan bool),
}
s.hostSource = &ringDescriber{
session: s,
closeChan: make(chan bool),
}
s.pool = cfg.PoolConfig.buildPool(s)
var hosts []*HostInfo
if !cfg.disableControlConn {
s.control = createControlConn(s)
s.control.reconnect(false)
// need to setup host source to check for rpc_address in system.local
localHasRPCAddr, err := checkSystemLocal(s.control)
if err != nil {
log.Printf("gocql: unable to verify if system.local table contains rpc_address, falling back to connection address: %v", err)
if err := s.control.connect(cfg.Hosts); err != nil {
s.Close()
return nil, err
}
if cfg.DiscoverHosts {
s.hostSource.localHasRpcAddr = localHasRPCAddr
// need to setup host source to check for broadcast_address in system.local
localHasRPCAddr, _ := checkSystemLocal(s.control)
s.hostSource.localHasRpcAddr = localHasRPCAddr
hosts, _, err = s.hostSource.GetHosts()
if err != nil {
s.Close()
return nil, err
}
} else {
// we dont get host info
hosts = make([]*HostInfo, len(cfg.Hosts))
for i, hostport := range cfg.Hosts {
// TODO: remove duplication
addr, portStr, err := net.SplitHostPort(JoinHostPort(hostport, cfg.Port))
if err != nil {
s.Close()
return nil, fmt.Errorf("NewSession: unable to parse hostport of addr %q: %v", hostport, err)
}
port, err := strconv.Atoi(portStr)
if err != nil {
s.Close()
return nil, fmt.Errorf("NewSession: invalid port for hostport of addr %q: %v", hostport, err)
}
hosts[i] = &HostInfo{peer: addr, port: port, state: NodeUp}
}
}
if cfg.DiscoverHosts {
s.hostSource.refreshRing()
go s.hostSource.run(cfg.Discovery.Sleep)
for _, host := range hosts {
s.handleNodeUp(net.ParseIP(host.Peer()), host.Port(), false)
}
// TODO(zariel): we probably dont need this any more as we verify that we
// can connect to one of the endpoints supplied by using the control conn.
// See if there are any connections in the pool
if s.pool.Size() == 0 {
s.Close()
return nil, ErrNoConnectionsStarted
}
return s, nil
@@ -206,6 +245,10 @@ func (s *Session) Close() {
if s.control != nil {
s.control.close()
}
if s.nodeEvents != nil {
s.nodeEvents.stop()
}
}
func (s *Session) Closed() bool {
@@ -228,6 +271,7 @@ func (s *Session) executeQuery(qry *Query) *Iter {
for {
host, conn := s.pool.Pick(qry)
qry.attempts++
//Assign the error unavailable to the iterator
if conn == nil {
if qry.rt == nil || !qry.rt.Attempt(qry) {
@@ -241,11 +285,10 @@ func (s *Session) executeQuery(qry *Query) *Iter {
t := time.Now()
iter = conn.executeQuery(qry)
qry.totalLatency += time.Now().Sub(t).Nanoseconds()
qry.attempts++
//Exit for loop if the query was successful
if iter.err == nil {
host.Mark(iter.err)
host.Mark(nil)
break
}
@@ -495,6 +538,10 @@ func (s *Session) MapExecuteBatchCAS(batch *Batch, dest map[string]interface{})
}
}
func (s *Session) connect(addr string, errorHandler ConnErrorHandler) (*Conn, error) {
return Connect(addr, s.connCfg, errorHandler, s)
}
// Query represents a CQL statement that can be executed.
type Query struct {
stmt string
@@ -707,7 +754,7 @@ func (q *Query) PageState(state []byte) *Query {
// Exec executes the query without returning any rows.
func (q *Query) Exec() error {
iter := q.Iter()
return iter.err
return iter.Close()
}
func isUseStatement(stmt string) bool {
@@ -798,11 +845,17 @@ type Iter struct {
rows [][][]byte
meta resultMetadata
next *nextIter
host *HostInfo
framer *framer
once sync.Once
}
// Host returns the host which the query was sent to.
func (iter *Iter) Host() *HostInfo {
return iter.host
}
// Columns returns the name and type of the selected columns.
func (iter *Iter) Columns() []ColumnInfo {
return iter.meta.columns
@@ -834,7 +887,7 @@ func (iter *Iter) Scan(dest ...interface{}) bool {
// currently only support scanning into an expand tuple, such that its the same
// as scanning in more values from a single column
if len(dest) != iter.meta.actualColCount {
iter.err = errors.New("count mismatch")
iter.err = fmt.Errorf("gocql: not enough columns to scan into: have %d want %d", len(dest), iter.meta.actualColCount)
return false
}

View File

@@ -121,7 +121,7 @@ type tokenRing struct {
hosts []*HostInfo
}
func newTokenRing(partitioner string, hosts []HostInfo) (*tokenRing, error) {
func newTokenRing(partitioner string, hosts []*HostInfo) (*tokenRing, error) {
tokenRing := &tokenRing{
tokens: []token{},
hosts: []*HostInfo{},
@@ -137,9 +137,8 @@ func newTokenRing(partitioner string, hosts []HostInfo) (*tokenRing, error) {
return nil, fmt.Errorf("Unsupported partitioner '%s'", partitioner)
}
for i := range hosts {
host := &hosts[i]
for _, strToken := range host.Tokens {
for _, host := range hosts {
for _, strToken := range host.Tokens() {
token := tokenRing.partitioner.ParseString(strToken)
tokenRing.tokens = append(tokenRing.tokens, token)
tokenRing.hosts = append(tokenRing.hosts, host)
@@ -181,7 +180,7 @@ func (t *tokenRing) String() string {
buf.WriteString("]")
buf.WriteString(t.tokens[i].String())
buf.WriteString(":")
buf.WriteString(t.hosts[i].Peer)
buf.WriteString(t.hosts[i].Peer())
}
buf.WriteString("\n}")
return string(buf.Bytes())

View File

@@ -240,3 +240,12 @@ func (u *UUID) UnmarshalJSON(data []byte) error {
return err
}
func (u UUID) MarshalText() ([]byte, error) {
return []byte(u.String()), nil
}
func (u *UUID) UnmarshalText(text []byte) (err error) {
*u, err = ParseUUID(string(text))
return
}

View File

@@ -50,13 +50,18 @@ func (s *ActivityService) ListWatchers(owner, repo string, opt *ListOptions) ([]
// the empty string will fetch watched repos for the authenticated user.
//
// GitHub API Docs: https://developer.github.com/v3/activity/watching/#list-repositories-being-watched
func (s *ActivityService) ListWatched(user string) ([]Repository, *Response, error) {
func (s *ActivityService) ListWatched(user string, opt *ListOptions) ([]Repository, *Response, error) {
var u string
if user != "" {
u = fmt.Sprintf("users/%v/subscriptions", user)
} else {
u = "user/subscriptions"
}
u, err := addOptions(u, opt)
if err != nil {
return nil, nil, err
}
req, err := s.client.NewRequest("GET", u, nil)
if err != nil {
return nil, nil, err

View File

@@ -74,7 +74,7 @@ The GitHub API has good support for conditional requests which will help
prevent you from burning through your rate limit, as well as help speed up your
application. go-github does not handle conditional requests directly, but is
instead designed to work with a caching http.Transport. We recommend using
https://github.com/gregjones/httpcache, which can be used in conjuction with
https://github.com/gregjones/httpcache, which can be used in conjunction with
https://github.com/sourcegraph/apiproxy to provide additional flexibility and
control of caching rules.

View File

@@ -17,6 +17,7 @@ import (
"reflect"
"strconv"
"strings"
"sync"
"time"
"github.com/google/go-querystring/query"
@@ -31,6 +32,7 @@ const (
headerRateLimit = "X-RateLimit-Limit"
headerRateRemaining = "X-RateLimit-Remaining"
headerRateReset = "X-RateLimit-Reset"
headerOTP = "X-GitHub-OTP"
mediaTypeV3 = "application/vnd.github.v3+json"
defaultMediaType = "application/octet-stream"
@@ -46,6 +48,9 @@ const (
// https://developer.github.com/changes/2015-06-24-api-enhancements-for-working-with-organization-permissions/
mediaTypeOrgPermissionPreview = "application/vnd.github.ironman-preview+json"
mediaTypeOrgPermissionRepoPreview = "application/vnd.github.ironman-preview.repository+json"
// https://developer.github.com/changes/2015-11-11-protected-branches-api/
mediaTypeProtectedBranchesPreview = "application/vnd.github.loki-preview+json"
)
// A Client manages communication with the GitHub API.
@@ -64,11 +69,8 @@ type Client struct {
// User agent used when communicating with the GitHub API.
UserAgent string
// Rate specifies the current rate limit for the client as determined by the
// most recent API call. If the client is used in a multi-user application,
// this rate may not always be up-to-date. Call RateLimits() to check the
// current rate.
Rate Rate
rateMu sync.Mutex
rate Rate
// Services used for talking to different parts of the GitHub API.
Activity *ActivityService
@@ -292,6 +294,17 @@ func (r *Response) populateRate() {
}
}
// Rate specifies the current rate limit for the client as determined by the
// most recent API call. If the client is used in a multi-user application,
// this rate may not always be up-to-date. Call RateLimits() to check the
// current rate.
func (c *Client) Rate() Rate {
c.rateMu.Lock()
rate := c.rate
c.rateMu.Unlock()
return rate
}
// Do sends an API request and returns the API response. The API response is
// JSON decoded and stored in the value pointed to by v, or returned as an
// error if an API error has occurred. If v implements the io.Writer
@@ -307,7 +320,9 @@ func (c *Client) Do(req *http.Request, v interface{}) (*Response, error) {
response := newResponse(resp)
c.Rate = response.Rate
c.rateMu.Lock()
c.rate = response.Rate
c.rateMu.Unlock()
err = CheckResponse(resp)
if err != nil {
@@ -321,6 +336,9 @@ func (c *Client) Do(req *http.Request, v interface{}) (*Response, error) {
io.Copy(w, resp.Body)
} else {
err = json.NewDecoder(resp.Body).Decode(v)
if err == io.EOF {
err = nil // ignore EOF errors caused by empty response body
}
}
}
return response, err
@@ -343,8 +361,15 @@ func (r *ErrorResponse) Error() string {
r.Response.StatusCode, r.Message, r.Errors)
}
// sanitizeURL redacts the client_id and client_secret tokens from the URL which
// may be exposed to the user, specifically in the ErrorResponse error message.
// TwoFactorAuthError occurs when using HTTP Basic Authentication for a user
// that has two-factor authentication enabled. The request can be reattempted
// by providing a one-time password in the request.
type TwoFactorAuthError ErrorResponse
func (r *TwoFactorAuthError) Error() string { return (*ErrorResponse)(r).Error() }
// sanitizeURL redacts the client_secret parameter from the URL which may be
// exposed to the user, specifically in the ErrorResponse error message.
func sanitizeURL(uri *url.URL) *url.URL {
if uri == nil {
return nil
@@ -397,6 +422,9 @@ func CheckResponse(r *http.Response) error {
if err == nil && data != nil {
json.Unmarshal(data, errorResponse)
}
if r.StatusCode == http.StatusUnauthorized && strings.HasPrefix(r.Header.Get(headerOTP), "required") {
return (*TwoFactorAuthError)(errorResponse)
}
return errorResponse
}
@@ -548,6 +576,43 @@ func (t *UnauthenticatedRateLimitedTransport) transport() http.RoundTripper {
return http.DefaultTransport
}
// BasicAuthTransport is an http.RoundTripper that authenticates all requests
// using HTTP Basic Authentication with the provided username and password. It
// additionally supports users who have two-factor authentication enabled on
// their GitHub account.
type BasicAuthTransport struct {
Username string // GitHub username
Password string // GitHub password
OTP string // one-time password for users with two-factor auth enabled
// Transport is the underlying HTTP transport to use when making requests.
// It will default to http.DefaultTransport if nil.
Transport http.RoundTripper
}
// RoundTrip implements the RoundTripper interface.
func (t *BasicAuthTransport) RoundTrip(req *http.Request) (*http.Response, error) {
req = cloneRequest(req) // per RoundTrip contract
req.SetBasicAuth(t.Username, t.Password)
if t.OTP != "" {
req.Header.Add(headerOTP, t.OTP)
}
return t.transport().RoundTrip(req)
}
// Client returns an *http.Client that makes requests that are authenticated
// using HTTP Basic Authentication.
func (t *BasicAuthTransport) Client() *http.Client {
return &http.Client{Transport: t}
}
func (t *BasicAuthTransport) transport() http.RoundTripper {
if t.Transport != nil {
return t.Transport
}
return http.DefaultTransport
}
// cloneRequest returns a clone of the provided *http.Request. The clone is a
// shallow copy of the struct and its Header map.
func cloneRequest(r *http.Request) *http.Request {
@@ -555,9 +620,9 @@ func cloneRequest(r *http.Request) *http.Request {
r2 := new(http.Request)
*r2 = *r
// deep copy of the Header
r2.Header = make(http.Header)
r2.Header = make(http.Header, len(r.Header))
for k, s := range r.Header {
r2.Header[k] = s
r2.Header[k] = append([]string(nil), s...)
}
return r2
}

View File

@@ -65,7 +65,7 @@ type IssueListOptions struct {
Filter string `url:"filter,omitempty"`
// State filters issues based on their state. Possible values are: open,
// closed. Default is "open".
// closed, all. Default is "open".
State string `url:"state,omitempty"`
// Labels filters issues based on their label.
@@ -76,7 +76,7 @@ type IssueListOptions struct {
Sort string `url:"sort,omitempty"`
// Direction in which to sort issues. Possible values are: asc, desc.
// Default is "asc".
// Default is "desc".
Direction string `url:"direction,omitempty"`
// Since filters issues by time.
@@ -148,7 +148,7 @@ type IssueListByRepoOptions struct {
Milestone string `url:"milestone,omitempty"`
// State filters issues based on their state. Possible values are: open,
// closed. Default is "open".
// closed, all. Default is "open".
State string `url:"state,omitempty"`
// Assignee filters issues based on their assignee. Possible values are a
@@ -170,7 +170,7 @@ type IssueListByRepoOptions struct {
Sort string `url:"sort,omitempty"`
// Direction in which to sort issues. Possible values are: asc, desc.
// Default is "asc".
// Default is "desc".
Direction string `url:"direction,omitempty"`
// Since filters issues by time.

View File

@@ -52,7 +52,7 @@ type ListMembersOptions struct {
// 2fa_disabled, all. Default is "all".
Filter string `url:"filter,omitempty"`
// Role filters memebers returned by their role in the organization.
// Role filters members returned by their role in the organization.
// Possible values are:
// all - all members of the organization, regardless of role
// admin - organization owners

View File

@@ -448,8 +448,27 @@ func (s *RepositoriesService) ListTags(owner string, repo string, opt *ListOptio
// Branch represents a repository branch
type Branch struct {
Name *string `json:"name,omitempty"`
Commit *Commit `json:"commit,omitempty"`
Name *string `json:"name,omitempty"`
Commit *Commit `json:"commit,omitempty"`
Protection *Protection `json:"protection,omitempty"`
}
// Protection represents a repository branch's protection
type Protection struct {
Enabled *bool `json:"enabled,omitempty"`
RequiredStatusChecks *RequiredStatusChecks `json:"required_status_checks,omitempty"`
}
// RequiredStatusChecks represents the protection status of a individual branch
type RequiredStatusChecks struct {
// Who required status checks apply to.
// Possible values are:
// off
// non_admins
// everyone
EnforcementLevel *string `json:"enforcement_level,omitempty"`
// The list of status checks which are required
Contexts *[]string `json:"contexts,omitempty"`
}
// ListBranches lists branches for the specified repository.
@@ -486,6 +505,8 @@ func (s *RepositoriesService) GetBranch(owner, repo, branch string) (*Branch, *R
return nil, nil, err
}
req.Header.Set("Accept", mediaTypeProtectedBranchesPreview)
b := new(Branch)
resp, err := s.client.Do(req, b)
if err != nil {

View File

@@ -70,6 +70,7 @@ type Hook struct {
CreatedAt *time.Time `json:"created_at,omitempty"`
UpdatedAt *time.Time `json:"updated_at,omitempty"`
Name *string `json:"name,omitempty"`
URL *string `json:"url,omitempty"`
Events []string `json:"events,omitempty"`
Active *bool `json:"active,omitempty"`
Config map[string]interface{} `json:"config,omitempty"`

View File

@@ -104,6 +104,10 @@ func (p *epsilonGreedyHostPool) Get() HostPoolResponse {
p.Lock()
defer p.Unlock()
host := p.getEpsilonGreedy()
if host == "" {
return nil
}
started := time.Now()
return &epsilonHostPoolResponse{
standardHostPoolResponse: standardHostPoolResponse{host: host, pool: p},
@@ -161,6 +165,7 @@ func (p *epsilonGreedyHostPool) getEpsilonGreedy() string {
if len(possibleHosts) != 0 {
log.Println("Failed to randomly choose a host, Dan loses")
}
return p.getRoundRobin()
}

View File

@@ -44,6 +44,10 @@ type HostPool interface {
markFailed(HostPoolResponse)
ResetAll()
// ReturnUnhealthy when called with true will prevent an unhealthy node from
// being returned and will instead return a nil HostPoolResponse. If using
// this feature then you should check the result of Get for nil
ReturnUnhealthy(v bool)
Hosts() []string
SetHosts([]string)
}
@@ -52,6 +56,7 @@ type standardHostPool struct {
sync.RWMutex
hosts map[string]*hostEntry
hostList []*hostEntry
returnUnhealthy bool
initialRetryDelay time.Duration
maxRetryInterval time.Duration
nextHostIndex int
@@ -68,6 +73,7 @@ const defaultDecayDuration = time.Duration(5) * time.Minute
// Construct a basic HostPool using the hostnames provided
func New(hosts []string) HostPool {
p := &standardHostPool{
returnUnhealthy: true,
hosts: make(map[string]*hostEntry, len(hosts)),
hostList: make([]*hostEntry, len(hosts)),
initialRetryDelay: time.Duration(30) * time.Second,
@@ -113,6 +119,10 @@ func (p *standardHostPool) Get() HostPoolResponse {
p.Lock()
defer p.Unlock()
host := p.getRoundRobin()
if host == "" {
return nil
}
return &standardHostPoolResponse{host: host, pool: p}
}
@@ -135,6 +145,11 @@ func (p *standardHostPool) getRoundRobin() string {
}
}
// all hosts are down and returnUnhealhy is false then return no host
if !p.returnUnhealthy {
return ""
}
// all hosts are down. re-add them
p.doResetAll()
p.nextHostIndex = 0
@@ -153,6 +168,12 @@ func (p *standardHostPool) SetHosts(hosts []string) {
p.setHosts(hosts)
}
func (p *standardHostPool) ReturnUnhealthy(v bool) {
p.Lock()
defer p.Unlock()
p.returnUnhealthy = v
}
func (p *standardHostPool) setHosts(hosts []string) {
p.hosts = make(map[string]*hostEntry, len(hosts))
p.hostList = make([]*hostEntry, len(hosts))

View File

@@ -172,11 +172,11 @@ func DefaultConfig() *Config {
}
if !doVerify {
config.HttpClient.Transport = &http.Transport{
TLSClientConfig: &tls.Config{
InsecureSkipVerify: true,
},
transport := cleanhttp.DefaultTransport()
transport.TLSClientConfig = &tls.Config{
InsecureSkipVerify: true,
}
config.HttpClient.Transport = transport
}
}
@@ -261,9 +261,31 @@ func (r *request) setQueryOptions(q *QueryOptions) {
}
}
// durToMsec converts a duration to a millisecond specified string
// durToMsec converts a duration to a millisecond specified string. If the
// user selected a positive value that rounds to 0 ms, then we will use 1 ms
// so they get a short delay, otherwise Consul will translate the 0 ms into
// a huge default delay.
func durToMsec(dur time.Duration) string {
return fmt.Sprintf("%dms", dur/time.Millisecond)
ms := dur / time.Millisecond
if dur > 0 && ms == 0 {
ms = 1
}
return fmt.Sprintf("%dms", ms)
}
// serverError is a string we look for to detect 500 errors.
const serverError = "Unexpected response code: 500"
// IsServerError returns true for 500 errors from the Consul servers, these are
// usually retryable at a later time.
func IsServerError(err error) bool {
if err == nil {
return false
}
// TODO (slackpad) - Make a real error type here instead of using
// a string check.
return strings.Contains(err.Error(), serverError)
}
// setWriteOptions is used to annotate the request with

View File

@@ -2,7 +2,6 @@ package api
import (
"fmt"
"strings"
"sync"
"time"
)
@@ -29,7 +28,8 @@ const (
// DefaultMonitorRetryTime is how long we wait after a failed monitor check
// of a lock (500 response code). This allows the monitor to ride out brief
// periods of unavailability, subject to the MonitorRetries setting in the
// lock options which is by default set to 0, disabling this feature.
// lock options which is by default set to 0, disabling this feature. This
// affects locks and semaphores.
DefaultMonitorRetryTime = 2 * time.Second
// LockFlagValue is a magic flag we set to indicate a key
@@ -76,6 +76,8 @@ type LockOptions struct {
SessionTTL string // Optional, defaults to DefaultLockSessionTTL
MonitorRetries int // Optional, defaults to 0 which means no retries
MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime
LockWaitTime time.Duration // Optional, defaults to DefaultLockWaitTime
LockTryOnce bool // Optional, defaults to false which means try forever
}
// LockKey returns a handle to a lock struct which can be used
@@ -108,6 +110,9 @@ func (c *Client) LockOpts(opts *LockOptions) (*Lock, error) {
if opts.MonitorRetryTime == 0 {
opts.MonitorRetryTime = DefaultMonitorRetryTime
}
if opts.LockWaitTime == 0 {
opts.LockWaitTime = DefaultLockWaitTime
}
l := &Lock{
c: c,
opts: opts,
@@ -158,9 +163,11 @@ func (l *Lock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) {
// Setup the query options
kv := l.c.KV()
qOpts := &QueryOptions{
WaitTime: DefaultLockWaitTime,
WaitTime: l.opts.LockWaitTime,
}
start := time.Now()
attempts := 0
WAIT:
// Check if we should quit
select {
@@ -169,6 +176,17 @@ WAIT:
default:
}
// Handle the one-shot mode.
if l.opts.LockTryOnce && attempts > 0 {
elapsed := time.Now().Sub(start)
if elapsed > qOpts.WaitTime {
return nil, nil
}
qOpts.WaitTime -= elapsed
}
attempts++
// Look for an existing lock, blocking until not taken
pair, meta, err := kv.Get(l.opts.Key, qOpts)
if err != nil {
@@ -343,15 +361,11 @@ WAIT:
RETRY:
pair, meta, err := kv.Get(l.opts.Key, opts)
if err != nil {
// TODO (slackpad) - Make a real error type here instead of using
// a string check.
const serverError = "Unexpected response code: 500"
// If configured we can try to ride out a brief Consul unavailability
// by doing retries. Note that we have to attempt the retry in a non-
// blocking fashion so that we have a clean place to reset the retry
// counter if service is restored.
if retries > 0 && strings.Contains(err.Error(), serverError) {
if retries > 0 && IsServerError(err) {
time.Sleep(l.opts.MonitorRetryTime)
retries--
opts.WaitIndex = 0

View File

@@ -63,12 +63,16 @@ type Semaphore struct {
// SemaphoreOptions is used to parameterize the Semaphore
type SemaphoreOptions struct {
Prefix string // Must be set and have write permissions
Limit int // Must be set, and be positive
Value []byte // Optional, value to associate with the contender entry
Session string // Optional, created if not specified
SessionName string // Optional, defaults to DefaultLockSessionName
SessionTTL string // Optional, defaults to DefaultLockSessionTTL
Prefix string // Must be set and have write permissions
Limit int // Must be set, and be positive
Value []byte // Optional, value to associate with the contender entry
Session string // Optional, created if not specified
SessionName string // Optional, defaults to DefaultLockSessionName
SessionTTL string // Optional, defaults to DefaultLockSessionTTL
MonitorRetries int // Optional, defaults to 0 which means no retries
MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime
SemaphoreWaitTime time.Duration // Optional, defaults to DefaultSemaphoreWaitTime
SemaphoreTryOnce bool // Optional, defaults to false which means try forever
}
// semaphoreLock is written under the DefaultSemaphoreKey and
@@ -115,6 +119,12 @@ func (c *Client) SemaphoreOpts(opts *SemaphoreOptions) (*Semaphore, error) {
return nil, fmt.Errorf("invalid SessionTTL: %v", err)
}
}
if opts.MonitorRetryTime == 0 {
opts.MonitorRetryTime = DefaultMonitorRetryTime
}
if opts.SemaphoreWaitTime == 0 {
opts.SemaphoreWaitTime = DefaultSemaphoreWaitTime
}
s := &Semaphore{
c: c,
opts: opts,
@@ -172,9 +182,11 @@ func (s *Semaphore) Acquire(stopCh <-chan struct{}) (<-chan struct{}, error) {
// Setup the query options
qOpts := &QueryOptions{
WaitTime: DefaultSemaphoreWaitTime,
WaitTime: s.opts.SemaphoreWaitTime,
}
start := time.Now()
attempts := 0
WAIT:
// Check if we should quit
select {
@@ -183,6 +195,17 @@ WAIT:
default:
}
// Handle the one-shot mode.
if s.opts.SemaphoreTryOnce && attempts > 0 {
elapsed := time.Now().Sub(start)
if elapsed > qOpts.WaitTime {
return nil, nil
}
qOpts.WaitTime -= elapsed
}
attempts++
// Read the prefix
pairs, meta, err := kv.List(s.opts.Prefix, qOpts)
if err != nil {
@@ -460,8 +483,20 @@ func (s *Semaphore) monitorLock(session string, stopCh chan struct{}) {
kv := s.c.KV()
opts := &QueryOptions{RequireConsistent: true}
WAIT:
retries := s.opts.MonitorRetries
RETRY:
pairs, meta, err := kv.List(s.opts.Prefix, opts)
if err != nil {
// If configured we can try to ride out a brief Consul unavailability
// by doing retries. Note that we have to attempt the retry in a non-
// blocking fashion so that we have a clean place to reset the retry
// counter if service is restored.
if retries > 0 && IsServerError(err) {
time.Sleep(s.opts.MonitorRetryTime)
retries--
opts.WaitIndex = 0
goto RETRY
}
return
}
lockPair := s.findLock(pairs)

View File

@@ -6,10 +6,18 @@ import (
)
// GenerateUUID is used to generate a random UUID
func GenerateUUID() string {
func GenerateUUID() (string, error) {
buf := make([]byte, 16)
if _, err := rand.Read(buf); err != nil {
panic(fmt.Errorf("failed to read random bytes: %v", err))
return "", fmt.Errorf("failed to read random bytes: %v", err)
}
return FormatUUID(buf)
}
func FormatUUID(buf []byte) (string, error) {
if len(buf) != 16 {
return "", fmt.Errorf("wrong length byte slice (%d)", len(buf))
}
return fmt.Sprintf("%08x-%04x-%04x-%04x-%12x",
@@ -17,5 +25,5 @@ func GenerateUUID() string {
buf[4:6],
buf[6:8],
buf[8:10],
buf[10:16])
buf[10:16]), nil
}

View File

@@ -4,7 +4,7 @@ import (
"fmt"
"sync"
"github.com/hashicorp/golang-lru/internal"
"github.com/hashicorp/golang-lru/simplelru"
)
const (
@@ -30,9 +30,9 @@ type TwoQueueCache struct {
size int
recentSize int
recent *internal.LRU
frequent *internal.LRU
recentEvict *internal.LRU
recent *simplelru.LRU
frequent *simplelru.LRU
recentEvict *simplelru.LRU
lock sync.RWMutex
}
@@ -60,15 +60,15 @@ func New2QParams(size int, recentRatio float64, ghostRatio float64) (*TwoQueueCa
evictSize := int(float64(size) * ghostRatio)
// Allocate the LRUs
recent, err := internal.NewLRU(size, nil)
recent, err := simplelru.NewLRU(size, nil)
if err != nil {
return nil, err
}
frequent, err := internal.NewLRU(size, nil)
frequent, err := simplelru.NewLRU(size, nil)
if err != nil {
return nil, err
}
recentEvict, err := internal.NewLRU(evictSize, nil)
recentEvict, err := simplelru.NewLRU(evictSize, nil)
if err != nil {
return nil, err
}

View File

@@ -3,7 +3,7 @@ package lru
import (
"sync"
"github.com/hashicorp/golang-lru/internal"
"github.com/hashicorp/golang-lru/simplelru"
)
// ARCCache is a thread-safe fixed size Adaptive Replacement Cache (ARC).
@@ -18,11 +18,11 @@ type ARCCache struct {
size int // Size is the total capacity of the cache
p int // P is the dynamic preference towards T1 or T2
t1 *internal.LRU // T1 is the LRU for recently accessed items
b1 *internal.LRU // B1 is the LRU for evictions from t1
t1 *simplelru.LRU // T1 is the LRU for recently accessed items
b1 *simplelru.LRU // B1 is the LRU for evictions from t1
t2 *internal.LRU // T2 is the LRU for frequently accessed items
b2 *internal.LRU // B2 is the LRU for evictions from t2
t2 *simplelru.LRU // T2 is the LRU for frequently accessed items
b2 *simplelru.LRU // B2 is the LRU for evictions from t2
lock sync.RWMutex
}
@@ -30,19 +30,19 @@ type ARCCache struct {
// NewARC creates an ARC of the given size
func NewARC(size int) (*ARCCache, error) {
// Create the sub LRUs
b1, err := internal.NewLRU(size, nil)
b1, err := simplelru.NewLRU(size, nil)
if err != nil {
return nil, err
}
b2, err := internal.NewLRU(size, nil)
b2, err := simplelru.NewLRU(size, nil)
if err != nil {
return nil, err
}
t1, err := internal.NewLRU(size, nil)
t1, err := simplelru.NewLRU(size, nil)
if err != nil {
return nil, err
}
t2, err := internal.NewLRU(size, nil)
t2, err := simplelru.NewLRU(size, nil)
if err != nil {
return nil, err
}

View File

@@ -6,12 +6,12 @@ package lru
import (
"sync"
"github.com/hashicorp/golang-lru/internal"
"github.com/hashicorp/golang-lru/simplelru"
)
// Cache is a thread-safe fixed size LRU cache.
type Cache struct {
lru *internal.LRU
lru *simplelru.LRU
lock sync.RWMutex
}
@@ -23,7 +23,7 @@ func New(size int) (*Cache, error) {
// NewWithEvict constructs a fixed size cache with the given eviction
// callback.
func NewWithEvict(size int, onEvicted func(key interface{}, value interface{})) (*Cache, error) {
lru, err := internal.NewLRU(size, internal.EvictCallback(onEvicted))
lru, err := simplelru.NewLRU(size, simplelru.EvictCallback(onEvicted))
if err != nil {
return nil, err
}

View File

@@ -1,4 +1,4 @@
package internal
package simplelru
import (
"container/list"

View File

@@ -1,5 +1,42 @@
package jmespath
import "strconv"
// JmesPath is the epresentation of a compiled JMES path query. A JmesPath is
// safe for concurrent use by multiple goroutines.
type JMESPath struct {
ast ASTNode
intr *treeInterpreter
}
// Compile parses a JMESPath expression and returns, if successful, a JMESPath
// object that can be used to match against data.
func Compile(expression string) (*JMESPath, error) {
parser := NewParser()
ast, err := parser.Parse(expression)
if err != nil {
return nil, err
}
jmespath := &JMESPath{ast: ast, intr: newInterpreter()}
return jmespath, nil
}
// MustCompile is like Compile but panics if the expression cannot be parsed.
// It simplifies safe initialization of global variables holding compiled
// JMESPaths.
func MustCompile(expression string) *JMESPath {
jmespath, err := Compile(expression)
if err != nil {
panic(`jmespath: Compile(` + strconv.Quote(expression) + `): ` + err.Error())
}
return jmespath
}
// Search evaluates a JMESPath expression against input data and returns the result.
func (jp *JMESPath) Search(data interface{}) (interface{}, error) {
return jp.intr.Execute(jp.ast, data)
}
// Search evaluates a JMESPath expression against input data and returns the result.
func Search(expression string, data interface{}) (interface{}, error) {
intr := newInterpreter()

View File

@@ -1,27 +0,0 @@
Copyright (c) 2012 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@@ -1,16 +0,0 @@
### Extensions to the "os" package.
## Find the current Executable and ExecutableFolder.
There is sometimes utility in finding the current executable file
that is running. This can be used for upgrading the current executable
or finding resources located relative to the executable file. Both
working directory and the os.Args[0] value are arbitrary and cannot
be relied on; os.Args[0] can be "faked".
Multi-platform and supports:
* Linux
* OS X
* Windows
* Plan 9
* BSDs.

View File

@@ -1,33 +0,0 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Extensions to the standard "os" package.
package osext
import "path/filepath"
var cx, ce = executableClean()
func executableClean() (string, error) {
p, err := executable()
return filepath.Clean(p), err
}
// Executable returns an absolute path that can be used to
// re-invoke the current program.
// It may not be valid after the current program exits.
func Executable() (string, error) {
return cx, ce
}
// Returns same path as Executable, returns just the folder
// path. Excludes the executable name and any trailing slash.
func ExecutableFolder() (string, error) {
p, err := Executable()
if err != nil {
return "", err
}
return filepath.Dir(p), nil
}

View File

@@ -1,20 +0,0 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package osext
import (
"os"
"strconv"
"syscall"
)
func executable() (string, error) {
f, err := os.Open("/proc/" + strconv.Itoa(os.Getpid()) + "/text")
if err != nil {
return "", err
}
defer f.Close()
return syscall.Fd2path(int(f.Fd()))
}

View File

@@ -1,36 +0,0 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux netbsd solaris dragonfly
package osext
import (
"errors"
"fmt"
"os"
"runtime"
"strings"
)
func executable() (string, error) {
switch runtime.GOOS {
case "linux":
const deletedTag = " (deleted)"
execpath, err := os.Readlink("/proc/self/exe")
if err != nil {
return execpath, err
}
execpath = strings.TrimSuffix(execpath, deletedTag)
execpath = strings.TrimPrefix(execpath, deletedTag)
return execpath, nil
case "netbsd":
return os.Readlink("/proc/curproc/exe")
case "dragonfly":
return os.Readlink("/proc/curproc/file")
case "solaris":
return os.Readlink(fmt.Sprintf("/proc/%d/path/a.out", os.Getpid()))
}
return "", errors.New("ExecPath not implemented for " + runtime.GOOS)
}

View File

@@ -1,126 +0,0 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build darwin freebsd openbsd
package osext
import (
"os"
"os/exec"
"path/filepath"
"runtime"
"syscall"
"unsafe"
)
var initCwd, initCwdErr = os.Getwd()
func executable() (string, error) {
var mib [4]int32
switch runtime.GOOS {
case "freebsd":
mib = [4]int32{1 /* CTL_KERN */, 14 /* KERN_PROC */, 12 /* KERN_PROC_PATHNAME */, -1}
case "darwin":
mib = [4]int32{1 /* CTL_KERN */, 38 /* KERN_PROCARGS */, int32(os.Getpid()), -1}
case "openbsd":
mib = [4]int32{1 /* CTL_KERN */, 55 /* KERN_PROC_ARGS */, int32(os.Getpid()), 1 /* KERN_PROC_ARGV */}
}
n := uintptr(0)
// Get length.
_, _, errNum := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, 0, uintptr(unsafe.Pointer(&n)), 0, 0)
if errNum != 0 {
return "", errNum
}
if n == 0 { // This shouldn't happen.
return "", nil
}
buf := make([]byte, n)
_, _, errNum = syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, uintptr(unsafe.Pointer(&buf[0])), uintptr(unsafe.Pointer(&n)), 0, 0)
if errNum != 0 {
return "", errNum
}
if n == 0 { // This shouldn't happen.
return "", nil
}
var execPath string
switch runtime.GOOS {
case "openbsd":
// buf now contains **argv, with pointers to each of the C-style
// NULL terminated arguments.
var args []string
argv := uintptr(unsafe.Pointer(&buf[0]))
Loop:
for {
argp := *(**[1 << 20]byte)(unsafe.Pointer(argv))
if argp == nil {
break
}
for i := 0; uintptr(i) < n; i++ {
// we don't want the full arguments list
if string(argp[i]) == " " {
break Loop
}
if argp[i] != 0 {
continue
}
args = append(args, string(argp[:i]))
n -= uintptr(i)
break
}
if n < unsafe.Sizeof(argv) {
break
}
argv += unsafe.Sizeof(argv)
n -= unsafe.Sizeof(argv)
}
execPath = args[0]
// There is no canonical way to get an executable path on
// OpenBSD, so check PATH in case we are called directly
if execPath[0] != '/' && execPath[0] != '.' {
execIsInPath, err := exec.LookPath(execPath)
if err == nil {
execPath = execIsInPath
}
}
default:
for i, v := range buf {
if v == 0 {
buf = buf[:i]
break
}
}
execPath = string(buf)
}
var err error
// execPath will not be empty due to above checks.
// Try to get the absolute path if the execPath is not rooted.
if execPath[0] != '/' {
execPath, err = getAbs(execPath)
if err != nil {
return execPath, err
}
}
// For darwin KERN_PROCARGS may return the path to a symlink rather than the
// actual executable.
if runtime.GOOS == "darwin" {
if execPath, err = filepath.EvalSymlinks(execPath); err != nil {
return execPath, err
}
}
return execPath, nil
}
func getAbs(execPath string) (string, error) {
if initCwdErr != nil {
return execPath, initCwdErr
}
// The execPath may begin with a "../" or a "./" so clean it first.
// Join the two paths, trailing and starting slashes undetermined, so use
// the generic Join function.
return filepath.Join(initCwd, filepath.Clean(execPath)), nil
}

Some files were not shown because too many files have changed in this diff Show More