Bump deps

This commit is contained in:
Jeff Mitchell
2016-11-02 15:34:30 -04:00
parent 5708bed28d
commit ebf4318dae
211 changed files with 52187 additions and 17105 deletions

View File

@@ -256,6 +256,23 @@ const (
blobCopyStatusFailed = "failed"
)
// lease constants.
const (
leaseHeaderPrefix = "x-ms-lease-"
leaseID = "x-ms-lease-id"
leaseAction = "x-ms-lease-action"
leaseBreakPeriod = "x-ms-lease-break-period"
leaseDuration = "x-ms-lease-duration"
leaseProposedID = "x-ms-proposed-lease-id"
leaseTime = "x-ms-lease-time"
acquireLease = "acquire"
renewLease = "renew"
changeLease = "change"
releaseLease = "release"
breakLease = "break"
)
// BlockListType is used to filter out types of blocks in a Get Blocks List call
// for a block blob.
//
@@ -560,6 +577,134 @@ func (b BlobStorageClient) getBlobRange(container, name, bytesRange string, extr
return resp, err
}
// leasePut is common PUT code for the various aquire/release/break etc functions.
func (b BlobStorageClient) leaseCommonPut(container string, name string, headers map[string]string, expectedStatus int) (http.Header, error) {
params := url.Values{"comp": {"lease"}}
uri := b.client.getEndpoint(blobServiceName, pathForBlob(container, name), params)
resp, err := b.client.exec("PUT", uri, headers, nil)
if err != nil {
return nil, err
}
defer resp.body.Close()
if err := checkRespCode(resp.statusCode, []int{expectedStatus}); err != nil {
return nil, err
}
return resp.headers, nil
}
// AcquireLease creates a lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx
// returns leaseID acquired
func (b BlobStorageClient) AcquireLease(container string, name string, leaseTimeInSeconds int, proposedLeaseID string) (returnedLeaseID string, err error) {
headers := b.client.getStandardHeaders()
headers[leaseAction] = acquireLease
headers[leaseProposedID] = proposedLeaseID
headers[leaseDuration] = strconv.Itoa(leaseTimeInSeconds)
respHeaders, err := b.leaseCommonPut(container, name, headers, http.StatusCreated)
if err != nil {
return "", err
}
returnedLeaseID = respHeaders.Get(http.CanonicalHeaderKey(leaseID))
if returnedLeaseID != "" {
return returnedLeaseID, nil
}
// what should we return in case of HTTP 201 but no lease ID?
// or it just cant happen? (brave words)
return "", errors.New("LeaseID not returned")
}
// BreakLease breaks the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx
// Returns the timeout remaining in the lease in seconds
func (b BlobStorageClient) BreakLease(container string, name string) (breakTimeout int, err error) {
headers := b.client.getStandardHeaders()
headers[leaseAction] = breakLease
return b.breakLeaseCommon(container, name, headers)
}
// BreakLeaseWithBreakPeriod breaks the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx
// breakPeriodInSeconds is used to determine how long until new lease can be created.
// Returns the timeout remaining in the lease in seconds
func (b BlobStorageClient) BreakLeaseWithBreakPeriod(container string, name string, breakPeriodInSeconds int) (breakTimeout int, err error) {
headers := b.client.getStandardHeaders()
headers[leaseAction] = breakLease
headers[leaseBreakPeriod] = strconv.Itoa(breakPeriodInSeconds)
return b.breakLeaseCommon(container, name, headers)
}
// breakLeaseCommon is common code for both version of BreakLease (with and without break period)
func (b BlobStorageClient) breakLeaseCommon(container string, name string, headers map[string]string) (breakTimeout int, err error) {
respHeaders, err := b.leaseCommonPut(container, name, headers, http.StatusAccepted)
if err != nil {
return 0, err
}
breakTimeoutStr := respHeaders.Get(http.CanonicalHeaderKey(leaseTime))
if breakTimeoutStr != "" {
breakTimeout, err = strconv.Atoi(breakTimeoutStr)
if err != nil {
return 0, err
}
}
return breakTimeout, nil
}
// ChangeLease changes a lease ID for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx
// Returns the new LeaseID acquired
func (b BlobStorageClient) ChangeLease(container string, name string, currentLeaseID string, proposedLeaseID string) (newLeaseID string, err error) {
headers := b.client.getStandardHeaders()
headers[leaseAction] = changeLease
headers[leaseID] = currentLeaseID
headers[leaseProposedID] = proposedLeaseID
respHeaders, err := b.leaseCommonPut(container, name, headers, http.StatusOK)
if err != nil {
return "", err
}
newLeaseID = respHeaders.Get(http.CanonicalHeaderKey(leaseID))
if newLeaseID != "" {
return newLeaseID, nil
}
return "", errors.New("LeaseID not returned")
}
// ReleaseLease releases the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx
func (b BlobStorageClient) ReleaseLease(container string, name string, currentLeaseID string) error {
headers := b.client.getStandardHeaders()
headers[leaseAction] = releaseLease
headers[leaseID] = currentLeaseID
_, err := b.leaseCommonPut(container, name, headers, http.StatusOK)
if err != nil {
return err
}
return nil
}
// RenewLease renews the lease for a blob as per https://msdn.microsoft.com/en-us/library/azure/ee691972.aspx
func (b BlobStorageClient) RenewLease(container string, name string, currentLeaseID string) error {
headers := b.client.getStandardHeaders()
headers[leaseAction] = renewLease
headers[leaseID] = currentLeaseID
_, err := b.leaseCommonPut(container, name, headers, http.StatusOK)
if err != nil {
return err
}
return nil
}
// GetBlobProperties provides various information about the specified
// blob. See https://msdn.microsoft.com/en-us/library/azure/dd179394.aspx
func (b BlobStorageClient) GetBlobProperties(container, name string) (*BlobProperties, error) {

View File

@@ -30,7 +30,15 @@ const (
Latitude string = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$"
Longitude string = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$"
DNSName string = `^([a-zA-Z0-9]{1}[a-zA-Z0-9_-]{1,62}){1}(\.[a-zA-Z0-9]{1}[a-zA-Z0-9_-]{1,62})*$`
URL string = `^((ftp|https?):\/\/)?(\S+(:\S*)?@)?((([1-9]\d?|1\d\d|2[01]\d|22[0-3])(\.(1?\d{1,2}|2[0-4]\d|25[0-5])){2}(?:\.([0-9]\d?|1\d\d|2[0-4]\d|25[0-4]))|(([a-zA-Z0-9]([a-zA-Z0-9-]+)?[a-zA-Z0-9]([-\.][a-zA-Z0-9]+)*)|((www\.)?))?(([a-zA-Z\x{00a1}-\x{ffff}0-9]+-?-?)*[a-zA-Z\x{00a1}-\x{ffff}0-9]+)(?:\.([a-zA-Z\x{00a1}-\x{ffff}]{1,}))?))(:(\d{1,5}))?((\/|\?|#)[^\s]*)?$`
IP string = `(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))`
URLSchema string = `((ftp|tcp|udp|wss?|https?):\/\/)`
URLUsername string = `(\S+(:\S*)?@)`
Hostname string = ``
URLPath string = `((\/|\?|#)[^\s]*)`
URLPort string = `(:(\d{1,5}))`
URLIP string = `([1-9]\d?|1\d\d|2[01]\d|22[0-3])(\.(1?\d{1,2}|2[0-4]\d|25[0-5])){2}(?:\.([0-9]\d?|1\d\d|2[0-4]\d|25[0-4]))`
URLSubdomain string = `((www\.)|([a-zA-Z0-9]([-\.][a-zA-Z0-9]+)*))`
URL string = `^` + URLSchema + `?` + URLUsername + `?` + `((` + URLIP + `|(\[` + IP + `\])|(([a-zA-Z0-9]([a-zA-Z0-9-]+)?[a-zA-Z0-9]([-\.][a-zA-Z0-9]+)*)|(` + URLSubdomain + `?))?(([a-zA-Z\x{00a1}-\x{ffff}0-9]+-?-?)*[a-zA-Z\x{00a1}-\x{ffff}0-9]+)(?:\.([a-zA-Z\x{00a1}-\x{ffff}]{1,}))?))` + URLPort + `?` + URLPath + `?$`
SSN string = `^\d{3}[- ]?\d{2}[- ]?\d{4}$`
WinPath string = `^[a-zA-Z]:\\(?:[^\\/:*?"<>|\r\n]+\\)*[^\\/:*?"<>|\r\n]*$`
UnixPath string = `^((?:\/[a-zA-Z0-9\.\:]+(?:_[a-zA-Z0-9\:\.]+)*(?:\-[\:a-zA-Z0-9\.]+)*)+\/?)$`

View File

@@ -496,6 +496,12 @@ func IsIPv6(str string) bool {
return ip != nil && strings.Contains(str, ":")
}
// IsCIDR check if the string is an valid CIDR notiation (IPV4 & IPV6)
func IsCIDR(str string) bool {
_, _, err := net.ParseCIDR(str)
return err == nil
}
// IsMAC check if a string is valid MAC address.
// Possible MAC formats:
// 01:23:45:67:89:ab

View File

@@ -2,7 +2,6 @@ package client
import (
"fmt"
"io/ioutil"
"net/http/httputil"
"github.com/aws/aws-sdk-go/aws"
@@ -104,8 +103,7 @@ func logRequest(r *request.Request) {
// Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's
// Body as a NoOpCloser and will not be reset after read by the HTTP
// client reader.
r.Body.Seek(r.BodyStart, 0)
r.HTTPRequest.Body = ioutil.NopCloser(r.Body)
r.ResetBody()
}
r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, string(dumpedBody)))

View File

@@ -137,9 +137,6 @@ type Config struct {
// accelerate enabled. If the bucket is not enabled for accelerate an error
// will be returned. The bucket name must be DNS compatible to also work
// with accelerate.
//
// Not compatible with UseDualStack requests will fail if both flags are
// specified.
S3UseAccelerate *bool
// Set this to `true` to disable the EC2Metadata client from overriding the

View File

@@ -10,9 +10,11 @@ import (
"regexp"
"runtime"
"strconv"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/request"
)
@@ -67,6 +69,34 @@ var SDKVersionUserAgentHandler = request.NamedHandler{
var reStatusCode = regexp.MustCompile(`^(\d{3})`)
// ValidateReqSigHandler is a request handler to ensure that the request's
// signature doesn't expire before it is sent. This can happen when a request
// is built and signed signficantly before it is sent. Or signficant delays
// occur whne retrying requests that would cause the signature to expire.
var ValidateReqSigHandler = request.NamedHandler{
Name: "core.ValidateReqSigHandler",
Fn: func(r *request.Request) {
// Unsigned requests are not signed
if r.Config.Credentials == credentials.AnonymousCredentials {
return
}
signedTime := r.Time
if !r.LastSignedAt.IsZero() {
signedTime = r.LastSignedAt
}
// 10 minutes to allow for some clock skew/delays in transmission.
// Would be improved with aws/aws-sdk-go#423
if signedTime.Add(10 * time.Minute).After(time.Now()) {
return
}
fmt.Println("request expired, resigning")
r.Sign()
},
}
// SendHandler is a request handler to send service request using HTTP client.
var SendHandler = request.NamedHandler{Name: "core.SendHandler", Fn: func(r *request.Request) {
var err error

View File

@@ -34,7 +34,7 @@ var (
//
// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider.
// In this example EnvProvider will first check if any credentials are available
// vai the environment variables. If there are none ChainProvider will check
// via the environment variables. If there are none ChainProvider will check
// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider
// does not return any credentials ChainProvider will return the error
// ErrNoValidProvidersFoundInChain

View File

@@ -72,6 +72,7 @@ func Handlers() request.Handlers {
handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler)
handlers.Build.AfterEachFn = request.HandlerListStopOnError
handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
handlers.Send.PushBackNamed(corehandlers.ValidateReqSigHandler)
handlers.Send.PushBackNamed(corehandlers.SendHandler)
handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler)
handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler)

View File

@@ -3,6 +3,7 @@ package ec2metadata
import (
"encoding/json"
"fmt"
"net/http"
"path"
"strings"
"time"
@@ -27,6 +28,27 @@ func (c *EC2Metadata) GetMetadata(p string) (string, error) {
return output.Content, req.Send()
}
// GetUserData returns the userdata that was configured for the service. If
// there is no user-data setup for the EC2 instance a "NotFoundError" error
// code will be returned.
func (c *EC2Metadata) GetUserData() (string, error) {
op := &request.Operation{
Name: "GetUserData",
HTTPMethod: "GET",
HTTPPath: path.Join("/", "user-data"),
}
output := &metadataOutput{}
req := c.NewRequest(op, nil, output)
req.Handlers.UnmarshalError.PushBack(func(r *request.Request) {
if r.HTTPResponse.StatusCode == http.StatusNotFound {
r.Error = awserr.New("NotFoundError", "user-data not found", r.Error)
}
})
return output.Content, req.Send()
}
// GetDynamicData uses the path provided to request information from the EC2
// instance metadata service for dynamic data. The content will be returned
// as a string, or error if the request failed.

View File

@@ -9,7 +9,7 @@ import (
// with retrying requests
type offsetReader struct {
buf io.ReadSeeker
lock sync.RWMutex
lock sync.Mutex
closed bool
}
@@ -21,7 +21,8 @@ func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader {
return reader
}
// Close is a thread-safe close. Uses the write lock.
// Close will close the instance of the offset reader's access to
// the underlying io.ReadSeeker.
func (o *offsetReader) Close() error {
o.lock.Lock()
defer o.lock.Unlock()
@@ -29,10 +30,10 @@ func (o *offsetReader) Close() error {
return nil
}
// Read is a thread-safe read using a read lock.
// Read is a thread-safe read of the underlying io.ReadSeeker
func (o *offsetReader) Read(p []byte) (int, error) {
o.lock.RLock()
defer o.lock.RUnlock()
o.lock.Lock()
defer o.lock.Unlock()
if o.closed {
return 0, io.EOF
@@ -41,6 +42,14 @@ func (o *offsetReader) Read(p []byte) (int, error) {
return o.buf.Read(p)
}
// Seek is a thread-safe seeking operation.
func (o *offsetReader) Seek(offset int64, whence int) (int64, error) {
o.lock.Lock()
defer o.lock.Unlock()
return o.buf.Seek(offset, whence)
}
// CloseAndCopy will return a new offsetReader with a copy of the old buffer
// and close the old buffer.
func (o *offsetReader) CloseAndCopy(offset int64) *offsetReader {

View File

@@ -4,7 +4,6 @@ import (
"bytes"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"reflect"
@@ -42,6 +41,12 @@ type Request struct {
LastSignedAt time.Time
built bool
// Need to persist an intermideant body betweend the input Body and HTTP
// request body because the HTTP Client's transport can maintain a reference
// to the HTTP request's body after the client has returned. This value is
// safe to use concurrently and rewraps the input Body for each HTTP request.
safeBody *offsetReader
}
// An Operation is the service API operation to be made.
@@ -135,8 +140,8 @@ func (r *Request) SetStringBody(s string) {
// SetReaderBody will set the request's body reader.
func (r *Request) SetReaderBody(reader io.ReadSeeker) {
r.HTTPRequest.Body = newOffsetReader(reader, 0)
r.Body = reader
r.ResetBody()
}
// Presign returns the request's signed URL. Error will be returned
@@ -220,6 +225,24 @@ func (r *Request) Sign() error {
return r.Error
}
// ResetBody rewinds the request body backto its starting position, and
// set's the HTTP Request body reference. When the body is read prior
// to being sent in the HTTP request it will need to be rewound.
func (r *Request) ResetBody() {
if r.safeBody != nil {
r.safeBody.Close()
}
r.safeBody = newOffsetReader(r.Body, r.BodyStart)
r.HTTPRequest.Body = r.safeBody
}
// GetBody will return an io.ReadSeeker of the Request's underlying
// input body with a concurrency safe wrapper.
func (r *Request) GetBody() io.ReadSeeker {
return r.safeBody
}
// Send will send the request returning error if errors are encountered.
//
// Send will sign the request prior to sending. All Send Handlers will
@@ -231,6 +254,8 @@ func (r *Request) Sign() error {
//
// readLoop() and getConn(req *Request, cm connectMethod)
// https://github.com/golang/go/blob/master/src/net/http/transport.go
//
// Send will not close the request.Request's body.
func (r *Request) Send() error {
for {
if aws.BoolValue(r.Retryable) {
@@ -239,21 +264,15 @@ func (r *Request) Send() error {
r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount))
}
var body io.ReadCloser
if reader, ok := r.HTTPRequest.Body.(*offsetReader); ok {
body = reader.CloseAndCopy(r.BodyStart)
} else {
if r.Config.Logger != nil {
r.Config.Logger.Log("Request body type has been overwritten. May cause race conditions")
}
r.Body.Seek(r.BodyStart, 0)
body = ioutil.NopCloser(r.Body)
}
// The previous http.Request will have a reference to the r.Body
// and the HTTP Client's Transport may still be reading from
// the request's body even though the Client's Do returned.
r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, nil)
r.ResetBody()
r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, body)
// Closing response body to ensure that no response body is leaked
// between retry attempts.
if r.HTTPResponse != nil && r.HTTPResponse.Body != nil {
// Closing response body. Since we are setting a new request to send off, this
// response will get squashed and leaked.
r.HTTPResponse.Body.Close()
}
}
@@ -281,7 +300,6 @@ func (r *Request) Send() error {
debugLogReqError(r, "Send Request", true, err)
continue
}
r.Handlers.UnmarshalMeta.Run(r)
r.Handlers.ValidateResponse.Run(r)
if r.Error != nil {

View File

@@ -66,7 +66,7 @@ through code instead of being driven by environment variables only.
Use NewSessionWithOptions when you want to provide the config profile, or
override the shared config state (AWS_SDK_LOAD_CONFIG).
// Equivalent to session.New
// Equivalent to session.NewSession()
sess, err := session.NewSessionWithOptions(session.Options{})
// Specify profile to load for the session's config

View File

@@ -2,7 +2,7 @@ package session
import (
"fmt"
"os"
"io/ioutil"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/credentials"
@@ -105,12 +105,13 @@ func loadSharedConfigIniFiles(filenames []string) ([]sharedConfigFile, error) {
files := make([]sharedConfigFile, 0, len(filenames))
for _, filename := range filenames {
if _, err := os.Stat(filename); os.IsNotExist(err) {
// Trim files from the list that don't exist.
b, err := ioutil.ReadFile(filename)
if err != nil {
// Skip files which can't be opened and read for whatever reason
continue
}
f, err := ini.Load(filename)
f, err := ini.Load(b)
if err != nil {
return nil, SharedConfigLoadError{Filename: filename}
}

View File

@@ -0,0 +1,24 @@
// +build go1.5
package v4
import (
"net/url"
"strings"
)
func getURIPath(u *url.URL) string {
var uri string
if len(u.Opaque) > 0 {
uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/")
} else {
uri = u.EscapedPath()
}
if len(uri) == 0 {
uri = "/"
}
return uri
}

View File

@@ -0,0 +1,24 @@
// +build !go1.5
package v4
import (
"net/url"
"strings"
)
func getURIPath(u *url.URL) string {
var uri string
if len(u.Opaque) > 0 {
uri = "/" + strings.Join(strings.Split(u.Opaque, "/")[3:], "/")
} else {
uri = u.Path
}
if len(uri) == 0 {
uri = "/"
}
return uri
}

View File

@@ -2,6 +2,48 @@
//
// Provides request signing for request that need to be signed with
// AWS V4 Signatures.
//
// Standalone Signer
//
// Generally using the signer outside of the SDK should not require any additional
// logic when using Go v1.5 or higher. The signer does this by taking advantage
// of the URL.EscapedPath method. If your request URI requires additional escaping
// you many need to use the URL.Opaque to define what the raw URI should be sent
// to the service as.
//
// The signer will first check the URL.Opaque field, and use its value if set.
// The signer does require the URL.Opaque field to be set in the form of:
//
// "//<hostname>/<path>"
//
// // e.g.
// "//example.com/some/path"
//
// The leading "//" and hostname are required or the URL.Opaque escaping will
// not work correctly.
//
// If URL.Opaque is not set the signer will fallback to the URL.EscapedPath()
// method and using the returned value. If you're using Go v1.4 you must set
// URL.Opaque if the URI path needs escaping. If URL.Opaque is not set with
// Go v1.5 the signer will fallback to URL.Path.
//
// AWS v4 signature validation requires that the canonical string's URI path
// element must be the URI escaped form of the HTTP request's path.
// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
//
// The Go HTTP client will perform escaping automatically on the request. Some
// of these escaping may cause signature validation errors because the HTTP
// request differs from the URI path or query that the signature was generated.
// https://golang.org/pkg/net/url/#URL.EscapedPath
//
// Because of this, it is recommended that when using the signer outside of the
// SDK that explicitly escaping the request prior to being signed is preferable,
// and will help prevent signature validation errors. This can be done by setting
// the URL.Opaque or URL.RawPath. The SDK will use URL.Opaque first and then
// call URL.EscapedPath() if Opaque is not set.
//
// Test `TestStandaloneSign` provides a complete example of using the signer
// outside of the SDK and pre-escaping the URI path.
package v4
import (
@@ -120,6 +162,15 @@ type Signer struct {
// request's query string.
DisableHeaderHoisting bool
// Disables the automatic escaping of the URI path of the request for the
// siganture's canonical string's path. For services that do not need additional
// escaping then use this to disable the signer escaping the path.
//
// S3 is an example of a service that does not need additional escaping.
//
// http://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
DisableURIPathEscaping bool
// currentTimeFn returns the time value which represents the current time.
// This value should only be used for testing. If it is nil the default
// time.Now will be used.
@@ -151,6 +202,8 @@ type signingCtx struct {
ExpireTime time.Duration
SignedHeaderVals http.Header
DisableURIPathEscaping bool
credValues credentials.Value
isPresign bool
formattedTime string
@@ -236,22 +289,18 @@ func (v4 Signer) signWithBody(r *http.Request, body io.ReadSeeker, service, regi
}
ctx := &signingCtx{
Request: r,
Body: body,
Query: r.URL.Query(),
Time: signTime,
ExpireTime: exp,
isPresign: exp != 0,
ServiceName: service,
Region: region,
Request: r,
Body: body,
Query: r.URL.Query(),
Time: signTime,
ExpireTime: exp,
isPresign: exp != 0,
ServiceName: service,
Region: region,
DisableURIPathEscaping: v4.DisableURIPathEscaping,
}
if ctx.isRequestSigned() {
if !v4.Credentials.IsExpired() && currentTimeFn().Before(ctx.Time.Add(10*time.Minute)) {
// If the request is already signed, and the credentials have not
// expired, and the request is not too old ignore the signing request.
return ctx.SignedHeaderVals, nil
}
ctx.Time = currentTimeFn()
ctx.handlePresignRemoval()
}
@@ -359,6 +408,10 @@ func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time
v4.Logger = req.Config.Logger
v4.DisableHeaderHoisting = req.NotHoist
v4.currentTimeFn = curTimeFn
if name == "s3" {
// S3 service should not have any escaping applied
v4.DisableURIPathEscaping = true
}
})
signingTime := req.Time
@@ -366,7 +419,9 @@ func signSDKRequestWithCurrTime(req *request.Request, curTimeFn func() time.Time
signingTime = req.LastSignedAt
}
signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.Body, name, region, req.ExpireTime, signingTime)
signedHeaders, err := v4.signWithBody(req.HTTPRequest, req.GetBody(),
name, region, req.ExpireTime, signingTime,
)
if err != nil {
req.Error = err
req.SignedHeaderVals = nil
@@ -513,17 +568,10 @@ func (ctx *signingCtx) buildCanonicalHeaders(r rule, header http.Header) {
func (ctx *signingCtx) buildCanonicalString() {
ctx.Request.URL.RawQuery = strings.Replace(ctx.Query.Encode(), "+", "%20", -1)
uri := ctx.Request.URL.Opaque
if uri != "" {
uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/")
} else {
uri = ctx.Request.URL.Path
}
if uri == "" {
uri = "/"
}
if ctx.ServiceName != "s3" {
uri := getURIPath(ctx.Request.URL)
if !ctx.DisableURIPathEscaping {
uri = rest.EscapePath(uri, false)
}

View File

@@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
const SDKVersion = "1.4.14"
const SDKVersion = "1.5.0"

View File

@@ -1,7 +1,7 @@
// Package endpoints validates regional endpoints for services.
package endpoints
//go:generate go run ../model/cli/gen-endpoints/main.go endpoints.json endpoints_map.go
//go:generate go run -tags codegen ../model/cli/gen-endpoints/main.go endpoints.json endpoints_map.go
//go:generate gofmt -s -w endpoints_map.go
import (

View File

@@ -23,6 +23,10 @@
"us-gov-west-1/ec2metadata": {
"endpoint": "http://169.254.169.254/latest"
},
"*/budgets": {
"endpoint": "budgets.amazonaws.com",
"signingRegion": "us-east-1"
},
"*/cloudfront": {
"endpoint": "cloudfront.amazonaws.com",
"signingRegion": "us-east-1"

View File

@@ -18,6 +18,10 @@ var endpointsMap = endpointStruct{
"*/*": {
Endpoint: "{service}.{region}.amazonaws.com",
},
"*/budgets": {
Endpoint: "budgets.amazonaws.com",
SigningRegion: "us-east-1",
},
"*/cloudfront": {
Endpoint: "cloudfront.amazonaws.com",
SigningRegion: "us-east-1",

View File

@@ -1,7 +1,7 @@
// Package ec2query provides serialization of AWS EC2 requests and responses.
package ec2query
//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/ec2.json build_test.go
//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/ec2.json build_test.go
import (
"net/url"

View File

@@ -1,6 +1,6 @@
package ec2query
//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/ec2.json unmarshal_test.go
//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/ec2.json unmarshal_test.go
import (
"encoding/xml"

View File

@@ -2,8 +2,8 @@
// requests and responses.
package jsonrpc
//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/json.json build_test.go
//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/json.json unmarshal_test.go
//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/json.json build_test.go
//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/json.json unmarshal_test.go
import (
"encoding/json"

View File

@@ -1,7 +1,7 @@
// Package query provides serialization of AWS query requests, and responses.
package query
//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/query.json build_test.go
//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/query.json build_test.go
import (
"net/url"

View File

@@ -1,6 +1,6 @@
package query
//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/query.json unmarshal_test.go
//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/query.json unmarshal_test.go
import (
"encoding/xml"

View File

@@ -2,8 +2,8 @@
// requests and responses.
package restxml
//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/rest-xml.json build_test.go
//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/rest-xml.json unmarshal_test.go
//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/rest-xml.json build_test.go
//go:generate go run -tags codegen ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/rest-xml.json unmarshal_test.go
import (
"bytes"

File diff suppressed because it is too large Load Diff

View File

@@ -94,6 +94,22 @@ func UnmarshalList(l []*dynamodb.AttributeValue, out interface{}) error {
return NewDecoder().Decode(&dynamodb.AttributeValue{L: l}, out)
}
// UnmarshalListOfMaps is an alias for Unmarshal func which unmarshals a
// slice of maps of attribute values.
//
// This is useful for when you need to unmarshal the Items from a DynamoDB
// Query API call.
//
// The output value provided must be a non-nil pointer
func UnmarshalListOfMaps(l []map[string]*dynamodb.AttributeValue, out interface{}) error {
items := make([]*dynamodb.AttributeValue, len(l))
for i, m := range l {
items[i] = &dynamodb.AttributeValue{M: m}
}
return UnmarshalList(items, out)
}
// A Decoder provides unmarshaling AttributeValues to Go value types.
type Decoder struct {
MarshalOptions

View File

@@ -33,7 +33,7 @@ func (t *tag) parseJSONTag(structTag reflect.StructTag) {
}
func (t *tag) parseTagStr(tagStr string) {
parts := strings.SplitN(tagStr, ",", 2)
parts := strings.Split(tagStr, ",")
if len(parts) == 0 {
return
}

View File

@@ -16,22 +16,22 @@ import (
//
// This guide is intended for use with the following DynamoDB documentation:
//
// Amazon DynamoDB Getting Started Guide (http://docs.aws.amazon.com/amazondynamodb/latest/gettingstartedguide/)
// - provides hands-on exercises that help you learn the basics of working with
// DynamoDB. If you are new to DynamoDB, we recommend that you begin with the
// Getting Started Guide.
// * Amazon DynamoDB Getting Started Guide (http://docs.aws.amazon.com/amazondynamodb/latest/gettingstartedguide/)
// - provides hands-on exercises that help you learn the basics of working
// with DynamoDB. If you are new to DynamoDB, we recommend that you begin
// with the Getting Started Guide.
//
// Amazon DynamoDB Developer Guide (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/)
// - contains detailed information about DynamoDB concepts, usage, and best
// practices.
// * Amazon DynamoDB Developer Guide (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/)
// - contains detailed information about DynamoDB concepts, usage, and best
// practices.
//
// Amazon DynamoDB Streams API Reference (http://docs.aws.amazon.com/dynamodbstreams/latest/APIReference/)
// - provides descriptions and samples of the DynamoDB Streams API. (For more
// information, see Capturing Table Activity with DynamoDB Streams (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Streams.html)
// in the Amazon DynamoDB Developer Guide.)
// * Amazon DynamoDB Streams API Reference (http://docs.aws.amazon.com/dynamodbstreams/latest/APIReference/)
// - provides descriptions and samples of the DynamoDB Streams API. (For
// more information, see Capturing Table Activity with DynamoDB Streams (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/Streams.html)
// in the Amazon DynamoDB Developer Guide.)
//
// Instead of making the requests to the low-level DynamoDB API directly
// from your application, we recommend that you use the AWS Software Development
// Instead of making the requests to the low-level DynamoDB API directly from
// your application, we recommend that you use the AWS Software Development
// Kits (SDKs). The easy-to-use libraries in the AWS SDKs make it unnecessary
// to call the low-level DynamoDB API directly from your application. The libraries
// take care of request authentication, serialization, and connection management.
@@ -46,84 +46,82 @@ import (
// The following are short descriptions of each low-level API action, organized
// by function.
//
// Managing Tables
// Managing Tables
//
// CreateTable - Creates a table with user-specified provisioned throughput
// settings. You must define a primary key for the table - either a simple primary
// key (partition key), or a composite primary key (partition key and sort key).
// Optionally, you can create one or more secondary indexes, which provide fast
// data access using non-key attributes.
// * CreateTable - Creates a table with user-specified provisioned throughput
// settings. You must define a primary key for the table - either a simple
// primary key (partition key), or a composite primary key (partition key
// and sort key). Optionally, you can create one or more secondary indexes,
// which provide fast data access using non-key attributes.
//
// DescribeTable - Returns metadata for a table, such as table size, status,
// and index information.
// * DescribeTable - Returns metadata for a table, such as table size, status,
// and index information.
//
// UpdateTable - Modifies the provisioned throughput settings for a table.
// Optionally, you can modify the provisioned throughput settings for global
// secondary indexes on the table.
// * UpdateTable - Modifies the provisioned throughput settings for a table.
// Optionally, you can modify the provisioned throughput settings for global
// secondary indexes on the table.
//
// ListTables - Returns a list of all tables associated with the current
// AWS account and endpoint.
// * ListTables - Returns a list of all tables associated with the current
// AWS account and endpoint.
//
// DeleteTable - Deletes a table and all of its indexes.
// * DeleteTable - Deletes a table and all of its indexes.
//
// For conceptual information about managing tables, see Working with Tables
// For conceptual information about managing tables, see Working with Tables
// (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithTables.html)
// in the Amazon DynamoDB Developer Guide.
//
// Reading Data
// Reading Data
//
// GetItem - Returns a set of attributes for the item that has a given primary
// key. By default, GetItem performs an eventually consistent read; however,
// applications can request a strongly consistent read instead.
// * GetItem - Returns a set of attributes for the item that has a given
// primary key. By default, GetItem performs an eventually consistent read;
// however, applications can request a strongly consistent read instead.
//
// BatchGetItem - Performs multiple GetItem requests for data items using
// their primary keys, from one table or multiple tables. The response from
// BatchGetItem has a size limit of 16 MB and returns a maximum of 100 items.
// Both eventually consistent and strongly consistent reads can be used.
// * BatchGetItem - Performs multiple GetItem requests for data items using
// their primary keys, from one table or multiple tables. The response from
// BatchGetItem has a size limit of 16 MB and returns a maximum of 100 items.
// Both eventually consistent and strongly consistent reads can be used.
//
// Query - Returns one or more items from a table or a secondary index.
// You must provide a specific value for the partition key. You can narrow the
// scope of the query using comparison operators against a sort key value, or
// on the index key. Query supports either eventual or strong consistency. A
// single response has a size limit of 1 MB.
// * Query - Returns one or more items from a table or a secondary index.
// You must provide a specific value for the partition key. You can narrow
// the scope of the query using comparison operators against a sort key value,
// or on the index key. Query supports either eventual or strong consistency.
// A single response has a size limit of 1 MB.
//
// Scan - Reads every item in a table; the result set is eventually consistent.
// You can limit the number of items returned by filtering the data attributes,
// using conditional expressions. Scan can be used to enable ad-hoc querying
// of a table against non-key attributes; however, since this is a full table
// scan without using an index, Scan should not be used for any application
// query use case that requires predictable performance.
// * Scan - Reads every item in a table; the result set is eventually consistent.
// You can limit the number of items returned by filtering the data attributes,
// using conditional expressions. Scan can be used to enable ad-hoc querying
// of a table against non-key attributes; however, since this is a full table
// scan without using an index, Scan should not be used for any application
// query use case that requires predictable performance.
//
// For conceptual information about reading data, see Working with Items
// (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithItems.html)
// For conceptual information about reading data, see Working with Items (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithItems.html)
// and Query and Scan Operations (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html)
// in the Amazon DynamoDB Developer Guide.
//
// Modifying Data
// Modifying Data
//
// PutItem - Creates a new item, or replaces an existing item with a new
// item (including all the attributes). By default, if an item in the table
// already exists with the same primary key, the new item completely replaces
// the existing item. You can use conditional operators to replace an item only
// if its attribute values match certain conditions, or to insert a new item
// only if that item doesn't already exist.
// * PutItem - Creates a new item, or replaces an existing item with a new
// item (including all the attributes). By default, if an item in the table
// already exists with the same primary key, the new item completely replaces
// the existing item. You can use conditional operators to replace an item
// only if its attribute values match certain conditions, or to insert a
// new item only if that item doesn't already exist.
//
// UpdateItem - Modifies the attributes of an existing item. You can also
// use conditional operators to perform an update only if the item's attribute
// values match certain conditions.
// * UpdateItem - Modifies the attributes of an existing item. You can also
// use conditional operators to perform an update only if the item's attribute
// values match certain conditions.
//
// DeleteItem - Deletes an item in a table by primary key. You can use conditional
// operators to perform a delete an item only if the item's attribute values
// match certain conditions.
// * DeleteItem - Deletes an item in a table by primary key. You can use
// conditional operators to perform a delete an item only if the item's attribute
// values match certain conditions.
//
// BatchWriteItem - Performs multiple PutItem and DeleteItem requests across
// multiple tables in a single request. A failure of any request(s) in the batch
// will not cause the entire BatchWriteItem operation to fail. Supports batches
// of up to 25 items to put or delete, with a maximum total request size of
// 16 MB.
// * BatchWriteItem - Performs multiple PutItem and DeleteItem requests across
// multiple tables in a single request. A failure of any request(s) in the
// batch will not cause the entire BatchWriteItem operation to fail. Supports
// batches of up to 25 items to put or delete, with a maximum total request
// size of 16 MB.
//
// For conceptual information about modifying data, see Working with Items
// (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithItems.html)
// For conceptual information about modifying data, see Working with Items (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/WorkingWithItems.html)
// and Query and Scan Operations (http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/QueryAndScan.html)
// in the Amazon DynamoDB Developer Guide.
//The service client's operations are safe to be used concurrently.

View File

@@ -6,6 +6,10 @@ import (
"github.com/aws/aws-sdk-go/private/waiter"
)
// WaitUntilTableExists uses the DynamoDB API operation
// DescribeTable to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will
// be returned.
func (c *DynamoDB) WaitUntilTableExists(input *DescribeTableInput) error {
waiterCfg := waiter.Config{
Operation: "DescribeTable",
@@ -35,6 +39,10 @@ func (c *DynamoDB) WaitUntilTableExists(input *DescribeTableInput) error {
return w.Wait()
}
// WaitUntilTableNotExists uses the DynamoDB API operation
// DescribeTable to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will
// be returned.
func (c *DynamoDB) WaitUntilTableNotExists(input *DescribeTableInput) error {
waiterCfg := waiter.Config{
Operation: "DescribeTable",

File diff suppressed because it is too large Load Diff

View File

@@ -6,6 +6,10 @@ import (
"github.com/aws/aws-sdk-go/private/waiter"
)
// WaitUntilBundleTaskComplete uses the Amazon EC2 API operation
// DescribeBundleTasks to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will
// be returned.
func (c *EC2) WaitUntilBundleTaskComplete(input *DescribeBundleTasksInput) error {
waiterCfg := waiter.Config{
Operation: "DescribeBundleTasks",
@@ -35,6 +39,10 @@ func (c *EC2) WaitUntilBundleTaskComplete(input *DescribeBundleTasksInput) error
return w.Wait()
}
// WaitUntilConversionTaskCancelled uses the Amazon EC2 API operation
// DescribeConversionTasks to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will
// be returned.
func (c *EC2) WaitUntilConversionTaskCancelled(input *DescribeConversionTasksInput) error {
waiterCfg := waiter.Config{
Operation: "DescribeConversionTasks",
@@ -58,6 +66,10 @@ func (c *EC2) WaitUntilConversionTaskCancelled(input *DescribeConversionTasksInp
return w.Wait()
}
// WaitUntilConversionTaskCompleted uses the Amazon EC2 API operation
// DescribeConversionTasks to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will
// be returned.
func (c *EC2) WaitUntilConversionTaskCompleted(input *DescribeConversionTasksInput) error {
waiterCfg := waiter.Config{
Operation: "DescribeConversionTasks",
@@ -93,6 +105,10 @@ func (c *EC2) WaitUntilConversionTaskCompleted(input *DescribeConversionTasksInp
return w.Wait()
}
// WaitUntilConversionTaskDeleted uses the Amazon EC2 API operation
// DescribeConversionTasks to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will
// be returned.
func (c *EC2) WaitUntilConversionTaskDeleted(input *DescribeConversionTasksInput) error {
waiterCfg := waiter.Config{
Operation: "DescribeConversionTasks",
@@ -116,6 +132,10 @@ func (c *EC2) WaitUntilConversionTaskDeleted(input *DescribeConversionTasksInput
return w.Wait()
}
// WaitUntilCustomerGatewayAvailable uses the Amazon EC2 API operation
// DescribeCustomerGateways to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will
// be returned.
func (c *EC2) WaitUntilCustomerGatewayAvailable(input *DescribeCustomerGatewaysInput) error {
waiterCfg := waiter.Config{
Operation: "DescribeCustomerGateways",
@@ -151,6 +171,10 @@ func (c *EC2) WaitUntilCustomerGatewayAvailable(input *DescribeCustomerGatewaysI
return w.Wait()
}
// WaitUntilExportTaskCancelled uses the Amazon EC2 API operation
// DescribeExportTasks to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will
// be returned.
func (c *EC2) WaitUntilExportTaskCancelled(input *DescribeExportTasksInput) error {
waiterCfg := waiter.Config{
Operation: "DescribeExportTasks",
@@ -174,6 +198,10 @@ func (c *EC2) WaitUntilExportTaskCancelled(input *DescribeExportTasksInput) erro
return w.Wait()
}
// WaitUntilExportTaskCompleted uses the Amazon EC2 API operation
// DescribeExportTasks to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will
// be returned.
func (c *EC2) WaitUntilExportTaskCompleted(input *DescribeExportTasksInput) error {
waiterCfg := waiter.Config{
Operation: "DescribeExportTasks",
@@ -197,6 +225,10 @@ func (c *EC2) WaitUntilExportTaskCompleted(input *DescribeExportTasksInput) erro
return w.Wait()
}
// WaitUntilImageAvailable uses the Amazon EC2 API operation
// DescribeImages to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will
// be returned.
func (c *EC2) WaitUntilImageAvailable(input *DescribeImagesInput) error {
waiterCfg := waiter.Config{
Operation: "DescribeImages",
@@ -226,6 +258,10 @@ func (c *EC2) WaitUntilImageAvailable(input *DescribeImagesInput) error {
return w.Wait()
}
// WaitUntilImageExists uses the Amazon EC2 API operation
// DescribeImages to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will
// be returned.
func (c *EC2) WaitUntilImageExists(input *DescribeImagesInput) error {
waiterCfg := waiter.Config{
Operation: "DescribeImages",
@@ -255,6 +291,10 @@ func (c *EC2) WaitUntilImageExists(input *DescribeImagesInput) error {
return w.Wait()
}
// WaitUntilInstanceExists uses the Amazon EC2 API operation
// DescribeInstances to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will
// be returned.
func (c *EC2) WaitUntilInstanceExists(input *DescribeInstancesInput) error {
waiterCfg := waiter.Config{
Operation: "DescribeInstances",
@@ -284,6 +324,10 @@ func (c *EC2) WaitUntilInstanceExists(input *DescribeInstancesInput) error {
return w.Wait()
}
// WaitUntilInstanceRunning uses the Amazon EC2 API operation
// DescribeInstances to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will
// be returned.
func (c *EC2) WaitUntilInstanceRunning(input *DescribeInstancesInput) error {
waiterCfg := waiter.Config{
Operation: "DescribeInstances",
@@ -331,6 +375,10 @@ func (c *EC2) WaitUntilInstanceRunning(input *DescribeInstancesInput) error {
return w.Wait()
}
// WaitUntilInstanceStatusOk uses the Amazon EC2 API operation
// DescribeInstanceStatus to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will
// be returned.
func (c *EC2) WaitUntilInstanceStatusOk(input *DescribeInstanceStatusInput) error {
waiterCfg := waiter.Config{
Operation: "DescribeInstanceStatus",
@@ -360,6 +408,10 @@ func (c *EC2) WaitUntilInstanceStatusOk(input *DescribeInstanceStatusInput) erro
return w.Wait()
}
// WaitUntilInstanceStopped uses the Amazon EC2 API operation
// DescribeInstances to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will
// be returned.
func (c *EC2) WaitUntilInstanceStopped(input *DescribeInstancesInput) error {
waiterCfg := waiter.Config{
Operation: "DescribeInstances",
@@ -395,6 +447,10 @@ func (c *EC2) WaitUntilInstanceStopped(input *DescribeInstancesInput) error {
return w.Wait()
}
// WaitUntilInstanceTerminated uses the Amazon EC2 API operation
// DescribeInstances to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will
// be returned.
func (c *EC2) WaitUntilInstanceTerminated(input *DescribeInstancesInput) error {
waiterCfg := waiter.Config{
Operation: "DescribeInstances",
@@ -430,6 +486,10 @@ func (c *EC2) WaitUntilInstanceTerminated(input *DescribeInstancesInput) error {
return w.Wait()
}
// WaitUntilKeyPairExists uses the Amazon EC2 API operation
// DescribeKeyPairs to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will
// be returned.
func (c *EC2) WaitUntilKeyPairExists(input *DescribeKeyPairsInput) error {
waiterCfg := waiter.Config{
Operation: "DescribeKeyPairs",
@@ -459,6 +519,10 @@ func (c *EC2) WaitUntilKeyPairExists(input *DescribeKeyPairsInput) error {
return w.Wait()
}
// WaitUntilNatGatewayAvailable uses the Amazon EC2 API operation
// DescribeNatGateways to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will
// be returned.
func (c *EC2) WaitUntilNatGatewayAvailable(input *DescribeNatGatewaysInput) error {
waiterCfg := waiter.Config{
Operation: "DescribeNatGateways",
@@ -506,6 +570,10 @@ func (c *EC2) WaitUntilNatGatewayAvailable(input *DescribeNatGatewaysInput) erro
return w.Wait()
}
// WaitUntilNetworkInterfaceAvailable uses the Amazon EC2 API operation
// DescribeNetworkInterfaces to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will
// be returned.
func (c *EC2) WaitUntilNetworkInterfaceAvailable(input *DescribeNetworkInterfacesInput) error {
waiterCfg := waiter.Config{
Operation: "DescribeNetworkInterfaces",
@@ -535,6 +603,10 @@ func (c *EC2) WaitUntilNetworkInterfaceAvailable(input *DescribeNetworkInterface
return w.Wait()
}
// WaitUntilPasswordDataAvailable uses the Amazon EC2 API operation
// GetPasswordData to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will
// be returned.
func (c *EC2) WaitUntilPasswordDataAvailable(input *GetPasswordDataInput) error {
waiterCfg := waiter.Config{
Operation: "GetPasswordData",
@@ -558,6 +630,10 @@ func (c *EC2) WaitUntilPasswordDataAvailable(input *GetPasswordDataInput) error
return w.Wait()
}
// WaitUntilSnapshotCompleted uses the Amazon EC2 API operation
// DescribeSnapshots to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will
// be returned.
func (c *EC2) WaitUntilSnapshotCompleted(input *DescribeSnapshotsInput) error {
waiterCfg := waiter.Config{
Operation: "DescribeSnapshots",
@@ -581,6 +657,10 @@ func (c *EC2) WaitUntilSnapshotCompleted(input *DescribeSnapshotsInput) error {
return w.Wait()
}
// WaitUntilSpotInstanceRequestFulfilled uses the Amazon EC2 API operation
// DescribeSpotInstanceRequests to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will
// be returned.
func (c *EC2) WaitUntilSpotInstanceRequestFulfilled(input *DescribeSpotInstanceRequestsInput) error {
waiterCfg := waiter.Config{
Operation: "DescribeSpotInstanceRequests",
@@ -628,6 +708,10 @@ func (c *EC2) WaitUntilSpotInstanceRequestFulfilled(input *DescribeSpotInstanceR
return w.Wait()
}
// WaitUntilSubnetAvailable uses the Amazon EC2 API operation
// DescribeSubnets to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will
// be returned.
func (c *EC2) WaitUntilSubnetAvailable(input *DescribeSubnetsInput) error {
waiterCfg := waiter.Config{
Operation: "DescribeSubnets",
@@ -651,6 +735,10 @@ func (c *EC2) WaitUntilSubnetAvailable(input *DescribeSubnetsInput) error {
return w.Wait()
}
// WaitUntilSystemStatusOk uses the Amazon EC2 API operation
// DescribeInstanceStatus to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will
// be returned.
func (c *EC2) WaitUntilSystemStatusOk(input *DescribeInstanceStatusInput) error {
waiterCfg := waiter.Config{
Operation: "DescribeInstanceStatus",
@@ -674,6 +762,10 @@ func (c *EC2) WaitUntilSystemStatusOk(input *DescribeInstanceStatusInput) error
return w.Wait()
}
// WaitUntilVolumeAvailable uses the Amazon EC2 API operation
// DescribeVolumes to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will
// be returned.
func (c *EC2) WaitUntilVolumeAvailable(input *DescribeVolumesInput) error {
waiterCfg := waiter.Config{
Operation: "DescribeVolumes",
@@ -703,6 +795,10 @@ func (c *EC2) WaitUntilVolumeAvailable(input *DescribeVolumesInput) error {
return w.Wait()
}
// WaitUntilVolumeDeleted uses the Amazon EC2 API operation
// DescribeVolumes to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will
// be returned.
func (c *EC2) WaitUntilVolumeDeleted(input *DescribeVolumesInput) error {
waiterCfg := waiter.Config{
Operation: "DescribeVolumes",
@@ -732,6 +828,10 @@ func (c *EC2) WaitUntilVolumeDeleted(input *DescribeVolumesInput) error {
return w.Wait()
}
// WaitUntilVolumeInUse uses the Amazon EC2 API operation
// DescribeVolumes to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will
// be returned.
func (c *EC2) WaitUntilVolumeInUse(input *DescribeVolumesInput) error {
waiterCfg := waiter.Config{
Operation: "DescribeVolumes",
@@ -761,6 +861,10 @@ func (c *EC2) WaitUntilVolumeInUse(input *DescribeVolumesInput) error {
return w.Wait()
}
// WaitUntilVpcAvailable uses the Amazon EC2 API operation
// DescribeVpcs to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will
// be returned.
func (c *EC2) WaitUntilVpcAvailable(input *DescribeVpcsInput) error {
waiterCfg := waiter.Config{
Operation: "DescribeVpcs",
@@ -784,6 +888,10 @@ func (c *EC2) WaitUntilVpcAvailable(input *DescribeVpcsInput) error {
return w.Wait()
}
// WaitUntilVpcExists uses the Amazon EC2 API operation
// DescribeVpcs to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will
// be returned.
func (c *EC2) WaitUntilVpcExists(input *DescribeVpcsInput) error {
waiterCfg := waiter.Config{
Operation: "DescribeVpcs",
@@ -813,6 +921,10 @@ func (c *EC2) WaitUntilVpcExists(input *DescribeVpcsInput) error {
return w.Wait()
}
// WaitUntilVpcPeeringConnectionExists uses the Amazon EC2 API operation
// DescribeVpcPeeringConnections to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will
// be returned.
func (c *EC2) WaitUntilVpcPeeringConnectionExists(input *DescribeVpcPeeringConnectionsInput) error {
waiterCfg := waiter.Config{
Operation: "DescribeVpcPeeringConnections",
@@ -842,6 +954,10 @@ func (c *EC2) WaitUntilVpcPeeringConnectionExists(input *DescribeVpcPeeringConne
return w.Wait()
}
// WaitUntilVpnConnectionAvailable uses the Amazon EC2 API operation
// DescribeVpnConnections to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will
// be returned.
func (c *EC2) WaitUntilVpnConnectionAvailable(input *DescribeVpnConnectionsInput) error {
waiterCfg := waiter.Config{
Operation: "DescribeVpnConnections",
@@ -877,6 +993,10 @@ func (c *EC2) WaitUntilVpnConnectionAvailable(input *DescribeVpnConnectionsInput
return w.Wait()
}
// WaitUntilVpnConnectionDeleted uses the Amazon EC2 API operation
// DescribeVpnConnections to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will
// be returned.
func (c *EC2) WaitUntilVpnConnectionDeleted(input *DescribeVpnConnectionsInput) error {
waiterCfg := waiter.Config{
Operation: "DescribeVpnConnections",

File diff suppressed because it is too large Load Diff

View File

@@ -17,16 +17,15 @@ import (
// information about IAM, see AWS Identity and Access Management (IAM) (http://aws.amazon.com/iam/).
// For the user guide for IAM, see Using IAM (http://docs.aws.amazon.com/IAM/latest/UserGuide/).
//
// AWS provides SDKs that consist of libraries and sample code for various
// programming languages and platforms (Java, Ruby, .NET, iOS, Android, etc.).
// The SDKs provide a convenient way to create programmatic access to IAM and
// AWS. For example, the SDKs take care of tasks such as cryptographically signing
// requests (see below), managing errors, and retrying requests automatically.
// For information about the AWS SDKs, including how to download and install
// them, see the Tools for Amazon Web Services (http://aws.amazon.com/tools/)
// page.
// AWS provides SDKs that consist of libraries and sample code for various programming
// languages and platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs
// provide a convenient way to create programmatic access to IAM and AWS. For
// example, the SDKs take care of tasks such as cryptographically signing requests
// (see below), managing errors, and retrying requests automatically. For information
// about the AWS SDKs, including how to download and install them, see the Tools
// for Amazon Web Services (http://aws.amazon.com/tools/) page.
//
// We recommend that you use the AWS SDKs to make programmatic API calls to
// We recommend that you use the AWS SDKs to make programmatic API calls to
// IAM. However, you can also use the IAM Query API to make direct calls to
// the IAM web service. To learn more about the IAM Query API, see Making Query
// Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html)
@@ -35,12 +34,12 @@ import (
// for others. However, GET requests are subject to the limitation size of a
// URL. Therefore, for operations that require larger sizes, use a POST request.
//
// Signing Requests
// Signing Requests
//
// Requests must be signed using an access key ID and a secret access key.
// We strongly recommend that you do not use your AWS account access key ID
// and secret access key for everyday work with IAM. You can use the access
// key ID and secret access key for an IAM user or you can use the AWS Security
// Requests must be signed using an access key ID and a secret access key. We
// strongly recommend that you do not use your AWS account access key ID and
// secret access key for everyday work with IAM. You can use the access key
// ID and secret access key for an IAM user or you can use the AWS Security
// Token Service to generate temporary security credentials and use those to
// sign requests.
//
@@ -50,21 +49,21 @@ import (
// now require Signature Version 4. The documentation for operations that require
// version 4 indicate this requirement.
//
// Additional Resources
// Additional Resources
//
// For more information, see the following:
//
// AWS Security Credentials (http://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html).
// This topic provides general information about the types of credentials used
// for accessing AWS.
// * AWS Security Credentials (http://docs.aws.amazon.com/general/latest/gr/aws-security-credentials.html).
// This topic provides general information about the types of credentials
// used for accessing AWS.
//
// IAM Best Practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAMBestPractices.html).
// This topic presents a list of suggestions for using the IAM service to help
// secure your AWS resources.
// * IAM Best Practices (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAMBestPractices.html).
// This topic presents a list of suggestions for using the IAM service to
// help secure your AWS resources.
//
// Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html).
// This set of topics walk you through the process of signing a request using
// an access key ID and secret access key.
// * Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html).
// This set of topics walk you through the process of signing a request using
// an access key ID and secret access key.
//The service client's operations are safe to be used concurrently.
// It is not safe to mutate any of the client's properties though.
type IAM struct {

View File

@@ -6,6 +6,10 @@ import (
"github.com/aws/aws-sdk-go/private/waiter"
)
// WaitUntilInstanceProfileExists uses the IAM API operation
// GetInstanceProfile to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will
// be returned.
func (c *IAM) WaitUntilInstanceProfileExists(input *GetInstanceProfileInput) error {
waiterCfg := waiter.Config{
Operation: "GetInstanceProfile",
@@ -35,6 +39,10 @@ func (c *IAM) WaitUntilInstanceProfileExists(input *GetInstanceProfileInput) err
return w.Wait()
}
// WaitUntilUserExists uses the IAM API operation
// GetUser to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will
// be returned.
func (c *IAM) WaitUntilUserExists(input *GetUserInput) error {
waiterCfg := waiter.Config{
Operation: "GetUser",

File diff suppressed because it is too large Load Diff

View File

@@ -1,6 +1,7 @@
package s3
import (
"bytes"
"fmt"
"net/url"
"regexp"
@@ -37,14 +38,6 @@ var accelerateOpBlacklist = operationBlacklist{
func updateEndpointForS3Config(r *request.Request) {
forceHostStyle := aws.BoolValue(r.Config.S3ForcePathStyle)
accelerate := aws.BoolValue(r.Config.S3UseAccelerate)
useDualStack := aws.BoolValue(r.Config.UseDualStack)
if useDualStack && accelerate {
r.Error = awserr.New("InvalidParameterException",
fmt.Sprintf("configuration aws.Config.UseDualStack is not compatible with aws.Config.Accelerate"),
nil)
return
}
if accelerate && accelerateOpBlacklist.Continue(r) {
if forceHostStyle {
@@ -75,6 +68,10 @@ func updateEndpointForHostStyle(r *request.Request) {
moveBucketToHost(r.HTTPRequest.URL, bucket)
}
var (
accelElem = []byte("s3-accelerate.dualstack.")
)
func updateEndpointForAccelerate(r *request.Request) {
bucket, ok := bucketNameFromReqParams(r.Params)
if !ok {
@@ -93,6 +90,22 @@ func updateEndpointForAccelerate(r *request.Request) {
// Change endpoint from s3(-[a-z0-1-])?.amazonaws.com to s3-accelerate.amazonaws.com
r.HTTPRequest.URL.Host = replaceHostRegion(r.HTTPRequest.URL.Host, "accelerate")
if aws.BoolValue(r.Config.UseDualStack) {
host := []byte(r.HTTPRequest.URL.Host)
// Strip region from hostname
if idx := bytes.Index(host, accelElem); idx >= 0 {
start := idx + len(accelElem)
if end := bytes.IndexByte(host[start:], '.'); end >= 0 {
end += start + 1
copy(host[start:], host[end:])
host = host[:len(host)-(end-start)]
r.HTTPRequest.URL.Host = string(host)
}
}
}
moveBucketToHost(r.HTTPRequest.URL, bucket)
}

View File

@@ -6,6 +6,10 @@ import (
"github.com/aws/aws-sdk-go/private/waiter"
)
// WaitUntilBucketExists uses the Amazon S3 API operation
// HeadBucket to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will
// be returned.
func (c *S3) WaitUntilBucketExists(input *HeadBucketInput) error {
waiterCfg := waiter.Config{
Operation: "HeadBucket",
@@ -47,6 +51,10 @@ func (c *S3) WaitUntilBucketExists(input *HeadBucketInput) error {
return w.Wait()
}
// WaitUntilBucketNotExists uses the Amazon S3 API operation
// HeadBucket to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will
// be returned.
func (c *S3) WaitUntilBucketNotExists(input *HeadBucketInput) error {
waiterCfg := waiter.Config{
Operation: "HeadBucket",
@@ -70,6 +78,10 @@ func (c *S3) WaitUntilBucketNotExists(input *HeadBucketInput) error {
return w.Wait()
}
// WaitUntilObjectExists uses the Amazon S3 API operation
// HeadObject to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will
// be returned.
func (c *S3) WaitUntilObjectExists(input *HeadObjectInput) error {
waiterCfg := waiter.Config{
Operation: "HeadObject",
@@ -99,6 +111,10 @@ func (c *S3) WaitUntilObjectExists(input *HeadObjectInput) error {
return w.Wait()
}
// WaitUntilObjectNotExists uses the Amazon S3 API operation
// HeadObject to wait for a condition to be met before returning.
// If the condition is not meet within the max attempt window an error will
// be returned.
func (c *S3) WaitUntilObjectNotExists(input *HeadObjectInput) error {
waiterCfg := waiter.Config{
Operation: "HeadObject",

File diff suppressed because it is too large Load Diff

View File

@@ -17,7 +17,7 @@ import (
// This guide provides descriptions of the STS API. For more detailed information
// about using this service, go to Temporary Security Credentials (http://docs.aws.amazon.com/IAM/latest/UserGuide/id_credentials_temp.html).
//
// As an alternative to using the API, you can use one of the AWS SDKs, which
// As an alternative to using the API, you can use one of the AWS SDKs, which
// consist of libraries and sample code for various programming languages and
// platforms (Java, Ruby, .NET, iOS, Android, etc.). The SDKs provide a convenient
// way to create programmatic access to STS. For example, the SDKs take care
@@ -25,7 +25,7 @@ import (
// automatically. For information about the AWS SDKs, including how to download
// and install them, see the Tools for Amazon Web Services page (http://aws.amazon.com/tools/).
//
// For information about setting up signatures and authorization through the
// For information about setting up signatures and authorization through the
// API, go to Signing AWS API Requests (http://docs.aws.amazon.com/general/latest/gr/signing_aws_api_requests.html)
// in the AWS General Reference. For general information about the Query API,
// go to Making Query Requests (http://docs.aws.amazon.com/IAM/latest/UserGuide/IAM_UsingQueryAPI.html)
@@ -37,7 +37,7 @@ import (
// AWS product, you can find the product's technical documentation at http://aws.amazon.com/documentation/
// (http://aws.amazon.com/documentation/).
//
// Endpoints
// Endpoints
//
// The AWS Security Token Service (STS) has a default endpoint of https://sts.amazonaws.com
// that maps to the US East (N. Virginia) region. Additional regions are available
@@ -48,7 +48,7 @@ import (
// For information about STS endpoints, see Regions and Endpoints (http://docs.aws.amazon.com/general/latest/gr/rande.html#sts_region)
// in the AWS General Reference.
//
// Recording API requests
// Recording API requests
//
// STS supports AWS CloudTrail, which is a service that records AWS calls for
// your AWS account and delivers log files to an Amazon S3 bucket. By using

View File

@@ -14,16 +14,18 @@ func Ask(prompt string) (password string, err error) {
return FAsk(os.Stdout, prompt)
}
// Same as the Ask function, except it is possible to specify the file to write
// the prompt to.
// FAsk is the same as Ask, except it is possible to specify the file to write
// the prompt to. If 'nil' is passed as the writer, no prompt will be written.
func FAsk(wr io.Writer, prompt string) (password string, err error) {
if prompt != "" {
if wr != nil && prompt != "" {
fmt.Fprint(wr, prompt) // Display the prompt.
}
password, err = getPassword()
// Carriage return after the user input.
fmt.Fprintln(wr, "")
if wr != nil {
fmt.Fprintln(wr, "")
}
return
}

View File

@@ -24,17 +24,37 @@ import (
func main() {
log.Println("Configuring cgm")
logger := log.New(os.Stdout, "", log.LstdFlags)
logger.Println("Configuring cgm")
cmc := &cgm.Config{}
// Interval at which metrics are submitted to Circonus, default: 10 seconds
cmc.Interval = "10s" // 10 seconds
// cmc.Interval = "10s" // 10 seconds
// Enable debug messages, default: false
cmc.Debug = false
cmc.Debug = true
// Send debug messages to specific log.Logger instance
// default: if debug stderr, else, discard
//cmc.CheckManager.Log = ...
cmc.Log = logger
// Reset counter metrics after each submission, default: "true"
// Change to "false" to retain (and continue submitting) the last value.
// cmc.ResetCounters = "true"
// Reset gauge metrics after each submission, default: "true"
// Change to "false" to retain (and continue submitting) the last value.
// cmc.ResetGauges = "true"
// Reset histogram metrics after each submission, default: "true"
// Change to "false" to retain (and continue submitting) the last value.
// cmc.ResetHistograms = "true"
// Reset text metrics after each submission, default: "true"
// Change to "false" to retain (and continue submitting) the last value.
// cmc.ResetText = "true"
// Circonus API configuration options
//
@@ -53,10 +73,12 @@ func main() {
// otherwise: if an applicable check is NOT specified or found, an
// attempt will be made to automatically create one
//
// Pre-existing httptrap check submission_url
// Submission URL for an existing [httptrap] check
cmc.CheckManager.Check.SubmissionURL = os.Getenv("CIRCONUS_SUBMISION_URL")
// Pre-existing httptrap check id (check not check bundle)
cmc.CheckManager.Check.ID = ""
// ID of an existing [httptrap] check (note: check id not check bundle id)
cmc.CheckManager.Check.ID = os.Getenv("CIRCONUS_CHECK_ID")
// if neither a submission url nor check id are provided, an attempt will be made to find an existing
// httptrap check by using the circonus api to search for a check matching the following criteria:
// an active check,
@@ -68,50 +90,63 @@ func main() {
// default: 'hostname':'program name'
// note: for a persistent instance that is ephemeral or transient where metric continuity is
// desired set this explicitly so that the current hostname will not be used.
cmc.CheckManager.Check.InstanceID = ""
// Search tag - a specific tag which, when coupled with the instanceId serves to identify the
// origin and/or grouping of the metrics. Multiple tags may be used, separate with comma.
// (e.g. service:consul,service_role:server)
// default: service:application name (e.g. service:consul)
cmc.CheckManager.Check.SearchTag = ""
// cmc.CheckManager.Check.InstanceID = ""
// Search tag - specific tag(s) used in conjunction with isntanceId to search for an
// existing check. comma separated string of tags (spaces will be removed, no commas
// in tag elements).
// default: service:application name (e.g. service:consul service:nomad etc.)
// cmc.CheckManager.Check.SearchTag = ""
// Check secret, default: generated when a check needs to be created
cmc.CheckManager.Check.Secret = ""
// Check tags, array of strings, additional tags to add to a new check, default: none
//cmc.CheckManager.Check.Tags = []string{"category:tagname"}
// cmc.CheckManager.Check.Secret = ""
// Additional tag(s) to add when *creating* a check. comma separated string
// of tags (spaces will be removed, no commas in tag elements).
// (e.g. group:abc or service_role:agent,group:xyz).
// default: none
// cmc.CheckManager.Check.Tags = ""
// max amount of time to to hold on to a submission url
// when a given submission fails (due to retries) if the
// time the url was last updated is > than this, the trap
// url will be refreshed (e.g. if the broker is changed
// in the UI) default 5 minutes
cmc.CheckManager.Check.MaxURLAge = "5m"
// cmc.CheckManager.Check.MaxURLAge = "5m"
// custom display name for check, default: "InstanceId /cgm"
cmc.CheckManager.Check.DisplayName = ""
// cmc.CheckManager.Check.DisplayName = ""
// force metric activation - if a metric has been disabled via the UI
// the default behavior is to *not* re-activate the metric; this setting
// overrides the behavior and will re-activate the metric when it is
// encountered. "(true|false)", default "false"
cmc.CheckManager.Check.ForceMetricActivation = "false"
// cmc.CheckManager.Check.ForceMetricActivation = "false"
// Broker configuration options
//
// Broker ID of specific broker to use, default: random enterprise broker or
// Circonus default if no enterprise brokers are available.
// default: only used if set
cmc.CheckManager.Broker.ID = ""
// used to select a broker with the same tag (e.g. can be used to dictate that a broker
// serving a specific location should be used. "dc:sfo", "location:new_york", "zone:us-west")
// if more than one broker has the tag, one will be selected randomly from the resulting list
// default: not used unless != ""
cmc.CheckManager.Broker.SelectTag = ""
// cmc.CheckManager.Broker.ID = ""
// used to select a broker with the same tag(s) (e.g. can be used to dictate that a broker
// serving a specific location should be used. "dc:sfo", "loc:nyc,dc:nyc01", "zone:us-west")
// if more than one broker has the tag(s), one will be selected randomly from the resulting
// list. comma separated string of tags (spaces will be removed, no commas in tag elements).
// default: none
// cmc.CheckManager.Broker.SelectTag = ""
// longest time to wait for a broker connection (if latency is > the broker will
// be considered invalid and not available for selection.), default: 500 milliseconds
cmc.CheckManager.Broker.MaxResponseTime = "500ms"
// if broker Id or SelectTag are not specified, a broker will be selected randomly
// cmc.CheckManager.Broker.MaxResponseTime = "500ms"
// note: if broker Id or SelectTag are not specified, a broker will be selected randomly
// from the list of brokers available to the api token. enterprise brokers take precedence
// viable brokers are "active", have the "httptrap" module enabled, are reachable and respond
// within MaxResponseTime.
log.Println("Creating new cgm instance")
logger.Println("Creating new cgm instance")
metrics, err := cgm.NewCirconusMetrics(cmc)
if err != nil {
@@ -121,23 +156,44 @@ func main() {
src := rand.NewSource(time.Now().UnixNano())
rnd := rand.New(src)
log.Println("Starting cgm internal auto-flush timer")
logger.Println("Starting cgm internal auto-flush timer")
metrics.Start()
log.Println("Starting to send metrics")
logger.Println("Adding ctrl-c trap")
c := make(chan os.Signal, 2)
signal.Notify(c, os.Interrupt, syscall.SIGTERM)
go func() {
<-c
logger.Println("Received CTRL-C, flushing outstanding metrics before exit")
metrics.Flush()
os.Exit(0)
}()
// number of "sets" of metrics to send (a minute worth)
// Add metric tags (append to any existing tags on specified metric)
metrics.AddMetricTags("foo", []string{"cgm:test"})
metrics.AddMetricTags("baz", []string{"cgm:test"})
logger.Println("Starting to send metrics")
// number of "sets" of metrics to send
max := 60
for i := 1; i < max; i++ {
log.Printf("\tmetric set %d of %d", i, 60)
metrics.Timing("ding", rnd.Float64()*10)
metrics.Increment("dong")
metrics.Gauge("dang", 10)
time.Sleep(1000 * time.Millisecond)
logger.Printf("\tmetric set %d of %d", i, 60)
metrics.Timing("foo", rnd.Float64()*10)
metrics.Increment("bar")
metrics.Gauge("baz", 10)
if i == 35 {
// Set metric tags (overwrite current tags on specified metric)
metrics.SetMetricTags("baz", []string{"cgm:reset_test", "cgm:test2"})
}
time.Sleep(time.Second)
}
log.Println("Flushing any outstanding metrics manually")
logger.Println("Flushing any outstanding metrics manually")
metrics.Flush()
}

View File

@@ -24,9 +24,9 @@ const (
// a few sensible defaults
defaultAPIURL = "https://api.circonus.com/v2"
defaultAPIApp = "circonus-gometrics"
minRetryWait = 10 * time.Millisecond
maxRetryWait = 50 * time.Millisecond
maxRetries = 3
minRetryWait = 1 * time.Second
maxRetryWait = 15 * time.Second
maxRetries = 4 // equating to 1 + maxRetries total attempts
)
// TokenKeyType - Circonus API Token key
@@ -47,8 +47,11 @@ type URLType string
// SearchQueryType search query
type SearchQueryType string
// SearchTagType search/select tag type
type SearchTagType string
// SearchFilterType search filter
type SearchFilterType string
// TagType search/select/custom tag(s) type
type TagType []string
// Config options for Circonus API
type Config struct {
@@ -103,12 +106,13 @@ func NewAPI(ac *Config) (*API, error) {
a := &API{apiURL, key, app, ac.Debug, ac.Log}
a.Debug = ac.Debug
a.Log = ac.Log
if a.Debug && a.Log == nil {
a.Log = log.New(os.Stderr, "", log.LstdFlags)
}
if a.Log == nil {
if a.Debug {
a.Log = log.New(os.Stderr, "", log.LstdFlags)
} else {
a.Log = log.New(ioutil.Discard, "", log.LstdFlags)
}
a.Log = log.New(ioutil.Discard, "", log.LstdFlags)
}
return a, nil
@@ -167,7 +171,8 @@ func (a *API) apiCall(reqMethod string, reqPath string, data []byte) ([]byte, er
// the server time to recover, as 500's are typically not permanent
// errors and may relate to outages on the server side. This will catch
// invalid response codes as well, like 0 and 999.
if resp.StatusCode == 0 || resp.StatusCode >= 500 {
// Retry on 429 (rate limit) as well.
if resp.StatusCode == 0 || resp.StatusCode >= 500 || resp.StatusCode == 429 {
body, readErr := ioutil.ReadAll(resp.Body)
if readErr != nil {
lastHTTPError = fmt.Errorf("- last HTTP error: %d %+v", resp.StatusCode, readErr)
@@ -183,21 +188,28 @@ func (a *API) apiCall(reqMethod string, reqPath string, data []byte) ([]byte, er
client.RetryWaitMin = minRetryWait
client.RetryWaitMax = maxRetryWait
client.RetryMax = maxRetries
client.Logger = a.Log
// retryablehttp only groks log or no log
// but, outputs everything as [DEBUG] messages
if a.Debug {
client.Logger = a.Log
} else {
client.Logger = log.New(ioutil.Discard, "", log.LstdFlags)
}
client.CheckRetry = retryPolicy
resp, err := client.Do(req)
if err != nil {
if lastHTTPError != nil {
return nil, fmt.Errorf("[ERROR] fetching: %+v %+v", err, lastHTTPError)
return nil, lastHTTPError
}
return nil, fmt.Errorf("[ERROR] fetching %s: %+v", reqURL, err)
return nil, fmt.Errorf("[ERROR] %s: %+v", reqURL, err)
}
defer resp.Body.Close()
body, err := ioutil.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("[ERROR] reading body %+v", err)
return nil, fmt.Errorf("[ERROR] reading response %+v", err)
}
if resp.StatusCode < 200 || resp.StatusCode >= 300 {

View File

@@ -7,6 +7,7 @@ package api
import (
"encoding/json"
"fmt"
"strings"
)
// BrokerDetail instance attributes
@@ -55,8 +56,8 @@ func (a *API) FetchBrokerByCID(cid CIDType) (*Broker, error) {
}
// FetchBrokerListByTag return list of brokers with a specific tag
func (a *API) FetchBrokerListByTag(searchTag SearchTagType) ([]Broker, error) {
query := SearchQueryType(fmt.Sprintf("f__tags_has=%s", searchTag))
func (a *API) FetchBrokerListByTag(searchTag TagType) ([]Broker, error) {
query := SearchQueryType(fmt.Sprintf("f__tags_has=%s", strings.Replace(strings.Join(searchTag, ","), ",", "&f__tags_has=", -1)))
return a.BrokerSearch(query)
}

View File

@@ -69,9 +69,9 @@ func (a *API) FetchCheckBySubmissionURL(submissionURL URLType) (*Check, error) {
}
uuid := pathParts[0]
query := SearchQueryType(fmt.Sprintf("f__check_uuid=%s", uuid))
filter := SearchFilterType(fmt.Sprintf("f__check_uuid=%s", uuid))
checks, err := a.CheckSearch(query)
checks, err := a.CheckFilterSearch(filter)
if err != nil {
return nil, err
}
@@ -98,9 +98,9 @@ func (a *API) FetchCheckBySubmissionURL(submissionURL URLType) (*Check, error) {
}
// CheckSearch returns a list of checks matching a query/filter
// CheckSearch returns a list of checks matching a search query
func (a *API) CheckSearch(query SearchQueryType) ([]Check, error) {
queryURL := fmt.Sprintf("/check?%s", string(query))
queryURL := fmt.Sprintf("/check?search=%s", string(query))
result, err := a.Get(queryURL)
if err != nil {
@@ -114,3 +114,20 @@ func (a *API) CheckSearch(query SearchQueryType) ([]Check, error) {
return checks, nil
}
// CheckFilterSearch returns a list of checks matching a filter
func (a *API) CheckFilterSearch(filter SearchFilterType) ([]Check, error) {
filterURL := fmt.Sprintf("/check?%s", string(filter))
result, err := a.Get(filterURL)
if err != nil {
return nil, err
}
var checks []Check
if err := json.Unmarshal(result, &checks); err != nil {
return nil, err
}
return checks, nil
}

View File

@@ -25,10 +25,11 @@ type CheckBundleConfig struct {
// CheckBundleMetric individual metric configuration
type CheckBundleMetric struct {
Name string `json:"name"`
Type string `json:"type"`
Units string `json:"units"`
Status string `json:"status"`
Name string `json:"name"`
Type string `json:"type"`
Units string `json:"units"`
Status string `json:"status"`
Tags []string `json:"tags"`
}
// CheckBundle definition
@@ -116,7 +117,7 @@ func (a *API) CreateCheckBundle(config CheckBundle) (*CheckBundle, error) {
// UpdateCheckBundle updates a check bundle configuration
func (a *API) UpdateCheckBundle(config *CheckBundle) (*CheckBundle, error) {
if a.Debug {
a.Log.Printf("[DEBUG] Updating check bundle with new metrics.")
a.Log.Printf("[DEBUG] Updating check bundle.")
}
cfgJSON, err := json.Marshal(config)

View File

@@ -79,7 +79,7 @@ func (cm *CheckManager) selectBroker() (*api.Broker, error) {
var brokerList []api.Broker
var err error
if cm.brokerSelectTag != "" {
if len(cm.brokerSelectTag) > 0 {
brokerList, err = cm.apih.FetchBrokerListByTag(cm.brokerSelectTag)
if err != nil {
return nil, err

View File

@@ -17,6 +17,50 @@ import (
"github.com/circonus-labs/circonus-gometrics/api"
)
// UpdateCheck determines if the check needs to be updated (new metrics, tags, etc.)
func (cm *CheckManager) UpdateCheck(newMetrics map[string]*api.CheckBundleMetric) {
// only if check manager is enabled
if !cm.enabled {
return
}
// only if checkBundle has been populated
if cm.checkBundle == nil {
return
}
cm.addNewMetrics(newMetrics)
if len(cm.metricTags) > 0 {
for metricName, metricTags := range cm.metricTags {
// note: if a tag has been added (queued) for a metric which never gets sent
// the tags will be discarded. (setting tags does not *create* metrics.)
cm.AddMetricTags(metricName, metricTags, false)
cm.mtmu.Lock()
delete(cm.metricTags, metricName)
cm.mtmu.Unlock()
}
}
if cm.forceCheckUpdate {
newCheckBundle, err := cm.apih.UpdateCheckBundle(cm.checkBundle)
if err != nil {
cm.Log.Printf("[ERROR] updating check bundle %v", err)
return
}
cm.forceCheckUpdate = false
cm.cbmu.Lock()
cm.checkBundle = newCheckBundle
cm.cbmu.Unlock()
cm.inventoryMetrics()
cm.cbmu.Unlock()
}
}
// Initialize CirconusMetrics instance. Attempt to find a check otherwise create one.
// use cases:
//
@@ -86,7 +130,7 @@ func (cm *CheckManager) initializeTrapURL() error {
} else {
searchCriteria := fmt.Sprintf(
"(active:1)(host:\"%s\")(type:\"%s\")(tags:%s)",
cm.checkInstanceID, cm.checkType, cm.checkSearchTag)
cm.checkInstanceID, cm.checkType, strings.Join(cm.checkSearchTag, ","))
checkBundle, err = cm.checkBundleSearch(searchCriteria)
if err != nil {
return err
@@ -204,7 +248,7 @@ func (cm *CheckManager) createNewCheck() (*api.CheckBundle, *api.Broker, error)
Notes: "",
Period: 60,
Status: statusActive,
Tags: append([]string{string(cm.checkSearchTag)}, cm.checkTags...),
Tags: append(cm.checkSearchTag, cm.checkTags...),
Target: string(cm.checkInstanceID),
Timeout: 10,
Type: string(cm.checkType),

View File

@@ -16,6 +16,7 @@ import (
"os"
"path"
"strconv"
"strings"
"sync"
"time"
@@ -58,7 +59,7 @@ type CheckConfig struct {
// used to search for a check to use
// used as check.target when creating a check
InstanceID string
// unique check searching tag
// unique check searching tag (or tags)
// used to search for a check to use (combined with instanceid)
// used as a regular tag when creating a check
SearchTag string
@@ -68,7 +69,7 @@ type CheckConfig struct {
Secret string
// additional tags to add to a check (when creating a check)
// these tags will not be added to an existing check
Tags []string
Tags string
// max amount of time to to hold on to a submission url
// when a given submission fails (due to retries) if the
// time the url was last updated is > than this, the trap
@@ -87,8 +88,8 @@ type CheckConfig struct {
type BrokerConfig struct {
// a specific broker id (numeric portion of cid)
ID string
// a tag that can be used to select 1-n brokers from which to select
// when creating a new check (e.g. datacenter:abc)
// one or more tags used to select 1-n brokers from which to select
// when creating a new check (e.g. datacenter:abc or loc:dfw,dc:abc)
SelectTag string
// for a broker to be considered viable it must respond to a
// connection attempt within this amount of time e.g. 200ms, 2s, 1m
@@ -118,7 +119,7 @@ type CheckInstanceIDType string
type CheckSecretType string
// CheckTagsType check tags
type CheckTagsType []string
type CheckTagsType string
// CheckDisplayNameType check display name
type CheckDisplayNameType string
@@ -137,20 +138,26 @@ type CheckManager struct {
checkType CheckTypeType
checkID api.IDType
checkInstanceID CheckInstanceIDType
checkSearchTag api.SearchTagType
checkSearchTag api.TagType
checkSecret CheckSecretType
checkTags CheckTagsType
checkTags api.TagType
checkSubmissionURL api.URLType
checkDisplayName CheckDisplayNameType
forceMetricActivation bool
forceCheckUpdate bool
// metric tags
metricTags map[string][]string
mtmu sync.Mutex
// broker
brokerID api.IDType
brokerSelectTag api.SearchTagType
brokerSelectTag api.TagType
brokerMaxResponseTime time.Duration
// state
checkBundle *api.CheckBundle
cbmu sync.Mutex
availableMetrics map[string]bool
trapURL api.URLType
trapCN BrokerCNType
@@ -178,14 +185,12 @@ func NewCheckManager(cfg *Config) (*CheckManager, error) {
}
cm.Debug = cfg.Debug
cm.Log = cfg.Log
if cm.Debug && cm.Log == nil {
cm.Log = log.New(os.Stderr, "", log.LstdFlags)
}
if cm.Log == nil {
if cm.Debug {
cm.Log = log.New(os.Stderr, "", log.LstdFlags)
} else {
cm.Log = log.New(ioutil.Discard, "", log.LstdFlags)
}
cm.Log = log.New(ioutil.Discard, "", log.LstdFlags)
}
if cfg.Check.SubmissionURL != "" {
@@ -233,9 +238,7 @@ func NewCheckManager(cfg *Config) (*CheckManager, error) {
cm.checkInstanceID = CheckInstanceIDType(cfg.Check.InstanceID)
cm.checkDisplayName = CheckDisplayNameType(cfg.Check.DisplayName)
cm.checkSearchTag = api.SearchTagType(cfg.Check.SearchTag)
cm.checkSecret = CheckSecretType(cfg.Check.Secret)
cm.checkTags = cfg.Check.Tags
fma := defaultForceMetricActivation
if cfg.Check.ForceMetricActivation != "" {
@@ -256,8 +259,14 @@ func NewCheckManager(cfg *Config) (*CheckManager, error) {
cm.checkInstanceID = CheckInstanceIDType(fmt.Sprintf("%s:%s", hn, an))
}
if cm.checkSearchTag == "" {
cm.checkSearchTag = api.SearchTagType(fmt.Sprintf("service:%s", an))
if cfg.Check.SearchTag == "" {
cm.checkSearchTag = []string{fmt.Sprintf("service:%s", an)}
} else {
cm.checkSearchTag = strings.Split(strings.Replace(cfg.Check.SearchTag, " ", "", -1), ",")
}
if cfg.Check.Tags != "" {
cm.checkTags = strings.Split(strings.Replace(cfg.Check.Tags, " ", "", -1), ",")
}
if cm.checkDisplayName == "" {
@@ -286,7 +295,9 @@ func NewCheckManager(cfg *Config) (*CheckManager, error) {
}
cm.brokerID = api.IDType(id)
cm.brokerSelectTag = api.SearchTagType(cfg.Broker.SelectTag)
if cfg.Broker.SelectTag != "" {
cm.brokerSelectTag = strings.Split(strings.Replace(cfg.Broker.SelectTag, " ", "", -1), ",")
}
dur = cfg.Broker.MaxResponseTime
if dur == "" {
@@ -300,6 +311,7 @@ func NewCheckManager(cfg *Config) (*CheckManager, error) {
// metrics
cm.availableMetrics = make(map[string]bool)
cm.metricTags = make(map[string][]string)
if err := cm.initializeTrapURL(); err != nil {
return nil, err

View File

@@ -29,44 +29,106 @@ func (cm *CheckManager) ActivateMetric(name string) bool {
return false
}
// AddNewMetrics updates a check bundle with new metrics
func (cm *CheckManager) AddNewMetrics(newMetrics map[string]*api.CheckBundleMetric) {
// only if check manager is enabled
if !cm.enabled {
return
// AddMetricTags updates check bundle metrics with tags
func (cm *CheckManager) AddMetricTags(metricName string, tags []string, appendTags bool) bool {
tagsUpdated := false
if len(tags) == 0 {
return tagsUpdated
}
// only if checkBundle has been populated
if cm.checkBundle == nil {
return
metricFound := false
for metricIdx, metric := range cm.checkBundle.Metrics {
if metric.Name == metricName {
metricFound = true
numNewTags := countNewTags(metric.Tags, tags)
if numNewTags == 0 {
if appendTags {
break // no new tags to add
} else if len(metric.Tags) == len(tags) {
break // no new tags and old/new same length
}
}
if appendTags {
metric.Tags = append(metric.Tags, tags...)
} else {
metric.Tags = tags
}
cm.cbmu.Lock()
cm.checkBundle.Metrics[metricIdx] = metric
cm.cbmu.Unlock()
tagsUpdated = true
}
}
newCheckBundle := cm.checkBundle
numCurrMetrics := len(newCheckBundle.Metrics)
if tagsUpdated {
if cm.Debug {
action := "Set"
if appendTags {
action = "Added"
}
cm.Log.Printf("[DEBUG] %s metric tag(s) %s %v\n", action, metricName, tags)
}
cm.cbmu.Lock()
cm.forceCheckUpdate = true
cm.cbmu.Unlock()
} else {
if !metricFound {
if _, exists := cm.metricTags[metricName]; !exists {
if cm.Debug {
cm.Log.Printf("[DEBUG] Queing metric tag(s) %s %v\n", metricName, tags)
}
// queue the tags, the metric is new (e.g. not in the check yet)
cm.mtmu.Lock()
cm.metricTags[metricName] = append(cm.metricTags[metricName], tags...)
cm.mtmu.Unlock()
}
}
}
return tagsUpdated
}
// addNewMetrics updates a check bundle with new metrics
func (cm *CheckManager) addNewMetrics(newMetrics map[string]*api.CheckBundleMetric) bool {
updatedCheckBundle := false
if cm.checkBundle == nil || len(newMetrics) == 0 {
return updatedCheckBundle
}
cm.cbmu.Lock()
numCurrMetrics := len(cm.checkBundle.Metrics)
numNewMetrics := len(newMetrics)
if numCurrMetrics+numNewMetrics >= cap(newCheckBundle.Metrics) {
if numCurrMetrics+numNewMetrics >= cap(cm.checkBundle.Metrics) {
nm := make([]api.CheckBundleMetric, numCurrMetrics+numNewMetrics)
copy(nm, newCheckBundle.Metrics)
newCheckBundle.Metrics = nm
copy(nm, cm.checkBundle.Metrics)
cm.checkBundle.Metrics = nm
}
newCheckBundle.Metrics = newCheckBundle.Metrics[0 : numCurrMetrics+numNewMetrics]
cm.checkBundle.Metrics = cm.checkBundle.Metrics[0 : numCurrMetrics+numNewMetrics]
i := 0
for _, metric := range newMetrics {
newCheckBundle.Metrics[numCurrMetrics+i] = *metric
cm.checkBundle.Metrics[numCurrMetrics+i] = *metric
i++
updatedCheckBundle = true
}
checkBundle, err := cm.apih.UpdateCheckBundle(newCheckBundle)
if err != nil {
cm.Log.Printf("[ERROR] updating check bundle with new metrics %v", err)
return
if updatedCheckBundle {
cm.forceCheckUpdate = true
}
cm.checkBundle = checkBundle
cm.inventoryMetrics()
cm.cbmu.Unlock()
return updatedCheckBundle
}
// inventoryMetrics creates list of active metrics in check bundle
@@ -77,3 +139,31 @@ func (cm *CheckManager) inventoryMetrics() {
}
cm.availableMetrics = availableMetrics
}
// countNewTags returns a count of new tags which do not exist in the current list of tags
func countNewTags(currTags []string, newTags []string) int {
if len(newTags) == 0 {
return 0
}
if len(currTags) == 0 {
return len(newTags)
}
newTagCount := 0
for _, newTag := range newTags {
found := false
for _, currTag := range currTags {
if newTag == currTag {
found = true
break
}
}
if !found {
newTagCount++
}
}
return newTagCount
}

View File

@@ -34,6 +34,7 @@ import (
"io/ioutil"
"log"
"os"
"strconv"
"sync"
"time"
@@ -47,8 +48,12 @@ const (
// Config options for circonus-gometrics
type Config struct {
Log *log.Logger
Debug bool
Log *log.Logger
Debug bool
ResetCounters string // reset/delete counters on flush (default true)
ResetGauges string // reset/delete gauges on flush (default true)
ResetHistograms string // reset/delete histograms on flush (default true)
ResetText string // reset/delete text on flush (default true)
// API, Check and Broker configuration options
CheckManager checkmgr.Config
@@ -59,12 +64,16 @@ type Config struct {
// CirconusMetrics state
type CirconusMetrics struct {
Log *log.Logger
Debug bool
flushInterval time.Duration
flushing bool
flushmu sync.Mutex
check *checkmgr.CheckManager
Log *log.Logger
Debug bool
resetCounters bool
resetGauges bool
resetHistograms bool
resetText bool
flushInterval time.Duration
flushing bool
flushmu sync.Mutex
check *checkmgr.CheckManager
counters map[string]uint64
cm sync.Mutex
@@ -106,12 +115,10 @@ func NewCirconusMetrics(cfg *Config) (*CirconusMetrics, error) {
}
cm.Debug = cfg.Debug
if cm.Debug {
if cfg.Log == nil {
cm.Log = log.New(os.Stderr, "", log.LstdFlags)
} else {
cm.Log = cfg.Log
}
cm.Log = cfg.Log
if cm.Debug && cfg.Log == nil {
cm.Log = log.New(os.Stderr, "", log.LstdFlags)
}
if cm.Log == nil {
cm.Log = log.New(ioutil.Discard, "", log.LstdFlags)
@@ -128,6 +135,36 @@ func NewCirconusMetrics(cfg *Config) (*CirconusMetrics, error) {
}
cm.flushInterval = dur
var setting bool
cm.resetCounters = true
if cfg.ResetCounters != "" {
if setting, err = strconv.ParseBool(cfg.ResetCounters); err == nil {
cm.resetCounters = setting
}
}
cm.resetGauges = true
if cfg.ResetGauges != "" {
if setting, err = strconv.ParseBool(cfg.ResetGauges); err == nil {
cm.resetGauges = setting
}
}
cm.resetHistograms = true
if cfg.ResetHistograms != "" {
if setting, err = strconv.ParseBool(cfg.ResetHistograms); err == nil {
cm.resetHistograms = setting
}
}
cm.resetText = true
if cfg.ResetText != "" {
if setting, err = strconv.ParseBool(cfg.ResetText); err == nil {
cm.resetText = setting
}
}
cfg.CheckManager.Debug = cm.Debug
cfg.CheckManager.Log = cm.Log

View File

@@ -29,19 +29,20 @@ func (m *CirconusMetrics) RecordValue(metric string, val float64) {
// SetHistogramValue adds a value to a histogram
func (m *CirconusMetrics) SetHistogramValue(metric string, val float64) {
m.NewHistogram(metric)
hist := m.NewHistogram(metric)
m.histograms[metric].rw.Lock()
defer m.histograms[metric].rw.Unlock()
m.histograms[metric].hist.RecordValue(val)
m.hm.Lock()
hist.rw.Lock()
hist.hist.RecordValue(val)
hist.rw.Unlock()
m.hm.Unlock()
}
// RemoveHistogram removes a histogram
func (m *CirconusMetrics) RemoveHistogram(metric string) {
m.hm.Lock()
defer m.hm.Unlock()
delete(m.histograms, metric)
m.hm.Unlock()
}
// NewHistogram returns a histogram instance.
@@ -71,7 +72,6 @@ func (h *Histogram) Name() string {
// RecordValue records the given value to a histogram instance
func (h *Histogram) RecordValue(v float64) {
h.rw.Lock()
defer h.rw.Unlock()
h.hist.RecordValue(v)
h.rw.Unlock()
}

View File

@@ -0,0 +1,15 @@
// Copyright 2016 Circonus, Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package circonusgometrics
// SetMetricTags sets the tags for the named metric and flags a check update is needed
func (m *CirconusMetrics) SetMetricTags(name string, tags []string) bool {
return m.check.AddMetricTags(name, tags, false)
}
// AddMetricTags appends tags to any existing tags for the named metric and flags a check update is needed
func (m *CirconusMetrics) AddMetricTags(name string, tags []string) bool {
return m.check.AddMetricTags(name, tags, true)
}

View File

@@ -21,9 +21,9 @@ import (
)
func (m *CirconusMetrics) submit(output map[string]interface{}, newMetrics map[string]*api.CheckBundleMetric) {
if len(newMetrics) > 0 {
m.check.AddNewMetrics(newMetrics)
}
// update check if there are any new metrics or, if metric tags have been added since last submit
m.check.UpdateCheck(newMetrics)
str, err := json.Marshal(output)
if err != nil {
@@ -107,10 +107,16 @@ func (m *CirconusMetrics) trapCall(payload []byte) (int, error) {
DisableCompression: true,
}
}
client.RetryWaitMin = 10 * time.Millisecond
client.RetryWaitMax = 50 * time.Millisecond
client.RetryWaitMin = 1 * time.Second
client.RetryWaitMax = 5 * time.Second
client.RetryMax = 3
client.Logger = m.Log
// retryablehttp only groks log or no log
// but, outputs everything as [DEBUG] messages
if m.Debug {
client.Logger = m.Log
} else {
client.Logger = log.New(ioutil.Discard, "", log.LstdFlags)
}
client.CheckRetry = retryPolicy
attempts := -1

View File

@@ -84,7 +84,9 @@ func (m *CirconusMetrics) snapshot() (c map[string]uint64, g map[string]string,
h = make(map[string]*circonusllhist.Histogram, len(m.histograms))
for n, hist := range m.histograms {
hist.rw.Lock()
h[n] = hist.hist.CopyAndReset()
hist.rw.Unlock()
}
t = make(map[string]string, len(m.text)+len(m.textFuncs))
@@ -96,5 +98,24 @@ func (m *CirconusMetrics) snapshot() (c map[string]uint64, g map[string]string,
t[n] = f()
}
if m.resetCounters {
m.counters = make(map[string]uint64)
m.counterFuncs = make(map[string]func() uint64)
}
if m.resetGauges {
m.gauges = make(map[string]string)
m.gaugeFuncs = make(map[string]func() int64)
}
if m.resetHistograms {
m.histograms = make(map[string]*Histogram)
}
if m.resetText {
m.text = make(map[string]string)
m.textFuncs = make(map[string]func() string)
}
return
}

View File

@@ -1,176 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS

View File

@@ -1,59 +0,0 @@
[![Build Status](https://travis-ci.org/cloudfoundry-incubator/candiedyaml.svg)](https://travis-ci.org/cloudfoundry-incubator/candiedyaml)
[![GoDoc](https://godoc.org/github.com/cloudfoundry-incubator/candiedyaml?status.svg)](https://godoc.org/github.com/cloudfoundry-incubator/candiedyaml)
candiedyaml
===========
YAML for Go
A YAML 1.1 parser with support for YAML 1.2 features
Usage
-----
```go
package myApp
import (
"github.com/cloudfoundry-incubator/candiedyaml"
"fmt"
"os"
)
func main() {
file, err := os.Open("path/to/some/file.yml")
if err != nil {
println("File does not exist:", err.Error())
os.Exit(1)
}
defer file.Close()
document := new(interface{})
decoder := candiedyaml.NewDecoder(file)
err = decoder.Decode(document)
if err != nil {
println("Failed to decode document:", err.Error())
}
println("parsed yml into interface:", fmt.Sprintf("%#v", document))
fileToWrite, err := os.Create("path/to/some/new/file.yml")
if err != nil {
println("Failed to open file for writing:", err.Error())
os.Exit(1)
}
defer fileToWrite.Close()
encoder := candiedyaml.NewEncoder(fileToWrite)
err = encoder.Encode(document)
if err != nil {
println("Failed to encode document:", err.Error())
os.Exit(1)
}
return
}
```

View File

@@ -1,834 +0,0 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package candiedyaml
import (
"io"
)
/*
* Create a new parser object.
*/
func yaml_parser_initialize(parser *yaml_parser_t) bool {
*parser = yaml_parser_t{
raw_buffer: make([]byte, 0, INPUT_RAW_BUFFER_SIZE),
buffer: make([]byte, 0, INPUT_BUFFER_SIZE),
}
return true
}
/*
* Destroy a parser object.
*/
func yaml_parser_delete(parser *yaml_parser_t) {
*parser = yaml_parser_t{}
}
/*
* String read handler.
*/
func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (int, error) {
if parser.input_pos == len(parser.input) {
return 0, io.EOF
}
n := copy(buffer, parser.input[parser.input_pos:])
parser.input_pos += n
return n, nil
}
/*
* File read handler.
*/
func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (int, error) {
return parser.input_reader.Read(buffer)
}
/*
* Set a string input.
*/
func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
if parser.read_handler != nil {
panic("input already set")
}
parser.read_handler = yaml_string_read_handler
parser.input = input
parser.input_pos = 0
}
/*
* Set a reader input
*/
func yaml_parser_set_input_reader(parser *yaml_parser_t, reader io.Reader) {
if parser.read_handler != nil {
panic("input already set")
}
parser.read_handler = yaml_file_read_handler
parser.input_reader = reader
}
/*
* Set a generic input.
*/
func yaml_parser_set_input(parser *yaml_parser_t, handler yaml_read_handler_t) {
if parser.read_handler != nil {
panic("input already set")
}
parser.read_handler = handler
}
/*
* Set the source encoding.
*/
func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
if parser.encoding != yaml_ANY_ENCODING {
panic("encoding already set")
}
parser.encoding = encoding
}
/*
* Create a new emitter object.
*/
func yaml_emitter_initialize(emitter *yaml_emitter_t) {
*emitter = yaml_emitter_t{
buffer: make([]byte, OUTPUT_BUFFER_SIZE),
raw_buffer: make([]byte, 0, OUTPUT_RAW_BUFFER_SIZE),
states: make([]yaml_emitter_state_t, 0, INITIAL_STACK_SIZE),
events: make([]yaml_event_t, 0, INITIAL_QUEUE_SIZE),
}
}
func yaml_emitter_delete(emitter *yaml_emitter_t) {
*emitter = yaml_emitter_t{}
}
/*
* String write handler.
*/
func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
*emitter.output_buffer = append(*emitter.output_buffer, buffer...)
return nil
}
/*
* File write handler.
*/
func yaml_writer_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
_, err := emitter.output_writer.Write(buffer)
return err
}
/*
* Set a string output.
*/
func yaml_emitter_set_output_string(emitter *yaml_emitter_t, buffer *[]byte) {
if emitter.write_handler != nil {
panic("output already set")
}
emitter.write_handler = yaml_string_write_handler
emitter.output_buffer = buffer
}
/*
* Set a file output.
*/
func yaml_emitter_set_output_writer(emitter *yaml_emitter_t, w io.Writer) {
if emitter.write_handler != nil {
panic("output already set")
}
emitter.write_handler = yaml_writer_write_handler
emitter.output_writer = w
}
/*
* Set a generic output handler.
*/
func yaml_emitter_set_output(emitter *yaml_emitter_t, handler yaml_write_handler_t) {
if emitter.write_handler != nil {
panic("output already set")
}
emitter.write_handler = handler
}
/*
* Set the output encoding.
*/
func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
if emitter.encoding != yaml_ANY_ENCODING {
panic("encoding already set")
}
emitter.encoding = encoding
}
/*
* Set the canonical output style.
*/
func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
emitter.canonical = canonical
}
/*
* Set the indentation increment.
*/
func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
if indent < 2 || indent > 9 {
indent = 2
}
emitter.best_indent = indent
}
/*
* Set the preferred line width.
*/
func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
if width < 0 {
width = -1
}
emitter.best_width = width
}
/*
* Set if unescaped non-ASCII characters are allowed.
*/
func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
emitter.unicode = unicode
}
/*
* Set the preferred line break character.
*/
func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
emitter.line_break = line_break
}
/*
* Destroy a token object.
*/
// yaml_DECLARE(void)
// yaml_token_delete(yaml_token_t *token)
// {
// assert(token); /* Non-NULL token object expected. */
//
// switch (token.type)
// {
// case yaml_TAG_DIRECTIVE_TOKEN:
// yaml_free(token.data.tag_directive.handle);
// yaml_free(token.data.tag_directive.prefix);
// break;
//
// case yaml_ALIAS_TOKEN:
// yaml_free(token.data.alias.value);
// break;
//
// case yaml_ANCHOR_TOKEN:
// yaml_free(token.data.anchor.value);
// break;
//
// case yaml_TAG_TOKEN:
// yaml_free(token.data.tag.handle);
// yaml_free(token.data.tag.suffix);
// break;
//
// case yaml_SCALAR_TOKEN:
// yaml_free(token.data.scalar.value);
// break;
//
// default:
// break;
// }
//
// memset(token, 0, sizeof(yaml_token_t));
// }
/*
* Check if a string is a valid UTF-8 sequence.
*
* Check 'reader.c' for more details on UTF-8 encoding.
*/
// static int
// yaml_check_utf8(yaml_char_t *start, size_t length)
// {
// yaml_char_t *end = start+length;
// yaml_char_t *pointer = start;
//
// while (pointer < end) {
// unsigned char octet;
// unsigned int width;
// unsigned int value;
// size_t k;
//
// octet = pointer[0];
// width = (octet & 0x80) == 0x00 ? 1 :
// (octet & 0xE0) == 0xC0 ? 2 :
// (octet & 0xF0) == 0xE0 ? 3 :
// (octet & 0xF8) == 0xF0 ? 4 : 0;
// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
// if (!width) return 0;
// if (pointer+width > end) return 0;
// for (k = 1; k < width; k ++) {
// octet = pointer[k];
// if ((octet & 0xC0) != 0x80) return 0;
// value = (value << 6) + (octet & 0x3F);
// }
// if (!((width == 1) ||
// (width == 2 && value >= 0x80) ||
// (width == 3 && value >= 0x800) ||
// (width == 4 && value >= 0x10000))) return 0;
//
// pointer += width;
// }
//
// return 1;
// }
/*
* Create STREAM-START.
*/
func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) {
*event = yaml_event_t{
event_type: yaml_STREAM_START_EVENT,
encoding: encoding,
}
}
/*
* Create STREAM-END.
*/
func yaml_stream_end_event_initialize(event *yaml_event_t) {
*event = yaml_event_t{
event_type: yaml_STREAM_END_EVENT,
}
}
/*
* Create DOCUMENT-START.
*/
func yaml_document_start_event_initialize(event *yaml_event_t,
version_directive *yaml_version_directive_t,
tag_directives []yaml_tag_directive_t,
implicit bool) {
*event = yaml_event_t{
event_type: yaml_DOCUMENT_START_EVENT,
version_directive: version_directive,
tag_directives: tag_directives,
implicit: implicit,
}
}
/*
* Create DOCUMENT-END.
*/
func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) {
*event = yaml_event_t{
event_type: yaml_DOCUMENT_END_EVENT,
implicit: implicit,
}
}
/*
* Create ALIAS.
*/
func yaml_alias_event_initialize(event *yaml_event_t, anchor []byte) {
*event = yaml_event_t{
event_type: yaml_ALIAS_EVENT,
anchor: anchor,
}
}
/*
* Create SCALAR.
*/
func yaml_scalar_event_initialize(event *yaml_event_t,
anchor []byte, tag []byte,
value []byte,
plain_implicit bool, quoted_implicit bool,
style yaml_scalar_style_t) {
*event = yaml_event_t{
event_type: yaml_SCALAR_EVENT,
anchor: anchor,
tag: tag,
value: value,
implicit: plain_implicit,
quoted_implicit: quoted_implicit,
style: yaml_style_t(style),
}
}
/*
* Create SEQUENCE-START.
*/
func yaml_sequence_start_event_initialize(event *yaml_event_t,
anchor []byte, tag []byte, implicit bool, style yaml_sequence_style_t) {
*event = yaml_event_t{
event_type: yaml_SEQUENCE_START_EVENT,
anchor: anchor,
tag: tag,
implicit: implicit,
style: yaml_style_t(style),
}
}
/*
* Create SEQUENCE-END.
*/
func yaml_sequence_end_event_initialize(event *yaml_event_t) {
*event = yaml_event_t{
event_type: yaml_SEQUENCE_END_EVENT,
}
}
/*
* Create MAPPING-START.
*/
func yaml_mapping_start_event_initialize(event *yaml_event_t,
anchor []byte, tag []byte, implicit bool, style yaml_mapping_style_t) {
*event = yaml_event_t{
event_type: yaml_MAPPING_START_EVENT,
anchor: anchor,
tag: tag,
implicit: implicit,
style: yaml_style_t(style),
}
}
/*
* Create MAPPING-END.
*/
func yaml_mapping_end_event_initialize(event *yaml_event_t) {
*event = yaml_event_t{
event_type: yaml_MAPPING_END_EVENT,
}
}
/*
* Destroy an event object.
*/
func yaml_event_delete(event *yaml_event_t) {
*event = yaml_event_t{}
}
// /*
// * Create a document object.
// */
//
// func yaml_document_initialize(document *yaml_document_t,
// version_directive *yaml_version_directive_t,
// tag_directives []yaml_tag_directive_t,
// start_implicit, end_implicit bool) bool {
//
//
// {
// struct {
// YAML_error_type_t error;
// } context;
// struct {
// yaml_node_t *start;
// yaml_node_t *end;
// yaml_node_t *top;
// } nodes = { NULL, NULL, NULL };
// yaml_version_directive_t *version_directive_copy = NULL;
// struct {
// yaml_tag_directive_t *start;
// yaml_tag_directive_t *end;
// yaml_tag_directive_t *top;
// } tag_directives_copy = { NULL, NULL, NULL };
// yaml_tag_directive_t value = { NULL, NULL };
// YAML_mark_t mark = { 0, 0, 0 };
//
// assert(document); /* Non-NULL document object is expected. */
// assert((tag_directives_start && tag_directives_end) ||
// (tag_directives_start == tag_directives_end));
// /* Valid tag directives are expected. */
//
// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error;
//
// if (version_directive) {
// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t));
// if (!version_directive_copy) goto error;
// version_directive_copy.major = version_directive.major;
// version_directive_copy.minor = version_directive.minor;
// }
//
// if (tag_directives_start != tag_directives_end) {
// yaml_tag_directive_t *tag_directive;
// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
// goto error;
// for (tag_directive = tag_directives_start;
// tag_directive != tag_directives_end; tag_directive ++) {
// assert(tag_directive.handle);
// assert(tag_directive.prefix);
// if (!yaml_check_utf8(tag_directive.handle,
// strlen((char *)tag_directive.handle)))
// goto error;
// if (!yaml_check_utf8(tag_directive.prefix,
// strlen((char *)tag_directive.prefix)))
// goto error;
// value.handle = yaml_strdup(tag_directive.handle);
// value.prefix = yaml_strdup(tag_directive.prefix);
// if (!value.handle || !value.prefix) goto error;
// if (!PUSH(&context, tag_directives_copy, value))
// goto error;
// value.handle = NULL;
// value.prefix = NULL;
// }
// }
//
// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
// tag_directives_copy.start, tag_directives_copy.top,
// start_implicit, end_implicit, mark, mark);
//
// return 1;
//
// error:
// STACK_DEL(&context, nodes);
// yaml_free(version_directive_copy);
// while (!STACK_EMPTY(&context, tag_directives_copy)) {
// yaml_tag_directive_t value = POP(&context, tag_directives_copy);
// yaml_free(value.handle);
// yaml_free(value.prefix);
// }
// STACK_DEL(&context, tag_directives_copy);
// yaml_free(value.handle);
// yaml_free(value.prefix);
//
// return 0;
// }
//
// /*
// * Destroy a document object.
// */
//
// yaml_DECLARE(void)
// yaml_document_delete(document *yaml_document_t)
// {
// struct {
// YAML_error_type_t error;
// } context;
// yaml_tag_directive_t *tag_directive;
//
// context.error = yaml_NO_ERROR; /* Eliminate a compliler warning. */
//
// assert(document); /* Non-NULL document object is expected. */
//
// while (!STACK_EMPTY(&context, document.nodes)) {
// yaml_node_t node = POP(&context, document.nodes);
// yaml_free(node.tag);
// switch (node.type) {
// case yaml_SCALAR_NODE:
// yaml_free(node.data.scalar.value);
// break;
// case yaml_SEQUENCE_NODE:
// STACK_DEL(&context, node.data.sequence.items);
// break;
// case yaml_MAPPING_NODE:
// STACK_DEL(&context, node.data.mapping.pairs);
// break;
// default:
// assert(0); /* Should not happen. */
// }
// }
// STACK_DEL(&context, document.nodes);
//
// yaml_free(document.version_directive);
// for (tag_directive = document.tag_directives.start;
// tag_directive != document.tag_directives.end;
// tag_directive++) {
// yaml_free(tag_directive.handle);
// yaml_free(tag_directive.prefix);
// }
// yaml_free(document.tag_directives.start);
//
// memset(document, 0, sizeof(yaml_document_t));
// }
//
// /**
// * Get a document node.
// */
//
// yaml_DECLARE(yaml_node_t *)
// yaml_document_get_node(document *yaml_document_t, int index)
// {
// assert(document); /* Non-NULL document object is expected. */
//
// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
// return document.nodes.start + index - 1;
// }
// return NULL;
// }
//
// /**
// * Get the root object.
// */
//
// yaml_DECLARE(yaml_node_t *)
// yaml_document_get_root_node(document *yaml_document_t)
// {
// assert(document); /* Non-NULL document object is expected. */
//
// if (document.nodes.top != document.nodes.start) {
// return document.nodes.start;
// }
// return NULL;
// }
//
// /*
// * Add a scalar node to a document.
// */
//
// yaml_DECLARE(int)
// yaml_document_add_scalar(document *yaml_document_t,
// yaml_char_t *tag, yaml_char_t *value, int length,
// yaml_scalar_style_t style)
// {
// struct {
// YAML_error_type_t error;
// } context;
// YAML_mark_t mark = { 0, 0, 0 };
// yaml_char_t *tag_copy = NULL;
// yaml_char_t *value_copy = NULL;
// yaml_node_t node;
//
// assert(document); /* Non-NULL document object is expected. */
// assert(value); /* Non-NULL value is expected. */
//
// if (!tag) {
// tag = (yaml_char_t *)yaml_DEFAULT_SCALAR_TAG;
// }
//
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error;
// tag_copy = yaml_strdup(tag);
// if (!tag_copy) goto error;
//
// if (length < 0) {
// length = strlen((char *)value);
// }
//
// if (!yaml_check_utf8(value, length)) goto error;
// value_copy = yaml_malloc(length+1);
// if (!value_copy) goto error;
// memcpy(value_copy, value, length);
// value_copy[length] = '\0';
//
// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark);
// if (!PUSH(&context, document.nodes, node)) goto error;
//
// return document.nodes.top - document.nodes.start;
//
// error:
// yaml_free(tag_copy);
// yaml_free(value_copy);
//
// return 0;
// }
//
// /*
// * Add a sequence node to a document.
// */
//
// yaml_DECLARE(int)
// yaml_document_add_sequence(document *yaml_document_t,
// yaml_char_t *tag, yaml_sequence_style_t style)
// {
// struct {
// YAML_error_type_t error;
// } context;
// YAML_mark_t mark = { 0, 0, 0 };
// yaml_char_t *tag_copy = NULL;
// struct {
// yaml_node_item_t *start;
// yaml_node_item_t *end;
// yaml_node_item_t *top;
// } items = { NULL, NULL, NULL };
// yaml_node_t node;
//
// assert(document); /* Non-NULL document object is expected. */
//
// if (!tag) {
// tag = (yaml_char_t *)yaml_DEFAULT_SEQUENCE_TAG;
// }
//
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error;
// tag_copy = yaml_strdup(tag);
// if (!tag_copy) goto error;
//
// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error;
//
// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
// style, mark, mark);
// if (!PUSH(&context, document.nodes, node)) goto error;
//
// return document.nodes.top - document.nodes.start;
//
// error:
// STACK_DEL(&context, items);
// yaml_free(tag_copy);
//
// return 0;
// }
//
// /*
// * Add a mapping node to a document.
// */
//
// yaml_DECLARE(int)
// yaml_document_add_mapping(document *yaml_document_t,
// yaml_char_t *tag, yaml_mapping_style_t style)
// {
// struct {
// YAML_error_type_t error;
// } context;
// YAML_mark_t mark = { 0, 0, 0 };
// yaml_char_t *tag_copy = NULL;
// struct {
// yaml_node_pair_t *start;
// yaml_node_pair_t *end;
// yaml_node_pair_t *top;
// } pairs = { NULL, NULL, NULL };
// yaml_node_t node;
//
// assert(document); /* Non-NULL document object is expected. */
//
// if (!tag) {
// tag = (yaml_char_t *)yaml_DEFAULT_MAPPING_TAG;
// }
//
// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error;
// tag_copy = yaml_strdup(tag);
// if (!tag_copy) goto error;
//
// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error;
//
// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
// style, mark, mark);
// if (!PUSH(&context, document.nodes, node)) goto error;
//
// return document.nodes.top - document.nodes.start;
//
// error:
// STACK_DEL(&context, pairs);
// yaml_free(tag_copy);
//
// return 0;
// }
//
// /*
// * Append an item to a sequence node.
// */
//
// yaml_DECLARE(int)
// yaml_document_append_sequence_item(document *yaml_document_t,
// int sequence, int item)
// {
// struct {
// YAML_error_type_t error;
// } context;
//
// assert(document); /* Non-NULL document is required. */
// assert(sequence > 0
// && document.nodes.start + sequence <= document.nodes.top);
// /* Valid sequence id is required. */
// assert(document.nodes.start[sequence-1].type == yaml_SEQUENCE_NODE);
// /* A sequence node is required. */
// assert(item > 0 && document.nodes.start + item <= document.nodes.top);
// /* Valid item id is required. */
//
// if (!PUSH(&context,
// document.nodes.start[sequence-1].data.sequence.items, item))
// return 0;
//
// return 1;
// }
//
// /*
// * Append a pair of a key and a value to a mapping node.
// */
//
// yaml_DECLARE(int)
// yaml_document_append_mapping_pair(document *yaml_document_t,
// int mapping, int key, int value)
// {
// struct {
// YAML_error_type_t error;
// } context;
//
// yaml_node_pair_t pair;
//
// assert(document); /* Non-NULL document is required. */
// assert(mapping > 0
// && document.nodes.start + mapping <= document.nodes.top);
// /* Valid mapping id is required. */
// assert(document.nodes.start[mapping-1].type == yaml_MAPPING_NODE);
// /* A mapping node is required. */
// assert(key > 0 && document.nodes.start + key <= document.nodes.top);
// /* Valid key id is required. */
// assert(value > 0 && document.nodes.start + value <= document.nodes.top);
// /* Valid value id is required. */
//
// pair.key = key;
// pair.value = value;
//
// if (!PUSH(&context,
// document.nodes.start[mapping-1].data.mapping.pairs, pair))
// return 0;
//
// return 1;
// }
//

View File

@@ -1,622 +0,0 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package candiedyaml
import (
"bytes"
"errors"
"fmt"
"io"
"reflect"
"runtime"
"strconv"
"strings"
)
type Unmarshaler interface {
UnmarshalYAML(tag string, value interface{}) error
}
// A Number represents a JSON number literal.
type Number string
// String returns the literal text of the number.
func (n Number) String() string { return string(n) }
// Float64 returns the number as a float64.
func (n Number) Float64() (float64, error) {
return strconv.ParseFloat(string(n), 64)
}
// Int64 returns the number as an int64.
func (n Number) Int64() (int64, error) {
return strconv.ParseInt(string(n), 10, 64)
}
type Decoder struct {
parser yaml_parser_t
event yaml_event_t
replay_events []yaml_event_t
useNumber bool
anchors map[string][]yaml_event_t
tracking_anchors [][]yaml_event_t
}
type ParserError struct {
ErrorType YAML_error_type_t
Context string
ContextMark YAML_mark_t
Problem string
ProblemMark YAML_mark_t
}
func (e *ParserError) Error() string {
return fmt.Sprintf("yaml: [%s] %s at line %d, column %d", e.Context, e.Problem, e.ProblemMark.line+1, e.ProblemMark.column+1)
}
type UnexpectedEventError struct {
Value string
EventType yaml_event_type_t
At YAML_mark_t
}
func (e *UnexpectedEventError) Error() string {
return fmt.Sprintf("yaml: Unexpect event [%d]: '%s' at line %d, column %d", e.EventType, e.Value, e.At.line+1, e.At.column+1)
}
func recovery(err *error) {
if r := recover(); r != nil {
if _, ok := r.(runtime.Error); ok {
panic(r)
}
var tmpError error
switch r := r.(type) {
case error:
tmpError = r
case string:
tmpError = errors.New(r)
default:
tmpError = errors.New("Unknown panic: " + reflect.ValueOf(r).String())
}
*err = tmpError
}
}
func Unmarshal(data []byte, v interface{}) error {
d := NewDecoder(bytes.NewBuffer(data))
return d.Decode(v)
}
func NewDecoder(r io.Reader) *Decoder {
d := &Decoder{
anchors: make(map[string][]yaml_event_t),
tracking_anchors: make([][]yaml_event_t, 1),
}
yaml_parser_initialize(&d.parser)
yaml_parser_set_input_reader(&d.parser, r)
return d
}
func (d *Decoder) Decode(v interface{}) (err error) {
defer recovery(&err)
rv := reflect.ValueOf(v)
if rv.Kind() != reflect.Ptr || rv.IsNil() {
return fmt.Errorf("Expected a pointer or nil but was a %s at %s", rv.String(), d.event.start_mark)
}
if d.event.event_type == yaml_NO_EVENT {
d.nextEvent()
if d.event.event_type != yaml_STREAM_START_EVENT {
return errors.New("Invalid stream")
}
d.nextEvent()
}
d.document(rv)
return nil
}
func (d *Decoder) UseNumber() { d.useNumber = true }
func (d *Decoder) error(err error) {
panic(err)
}
func (d *Decoder) nextEvent() {
if d.event.event_type == yaml_STREAM_END_EVENT {
d.error(errors.New("The stream is closed"))
}
if d.replay_events != nil {
d.event = d.replay_events[0]
if len(d.replay_events) == 1 {
d.replay_events = nil
} else {
d.replay_events = d.replay_events[1:]
}
} else {
if !yaml_parser_parse(&d.parser, &d.event) {
yaml_event_delete(&d.event)
d.error(&ParserError{
ErrorType: d.parser.error,
Context: d.parser.context,
ContextMark: d.parser.context_mark,
Problem: d.parser.problem,
ProblemMark: d.parser.problem_mark,
})
}
}
last := len(d.tracking_anchors)
// skip aliases when tracking an anchor
if last > 0 && d.event.event_type != yaml_ALIAS_EVENT {
d.tracking_anchors[last-1] = append(d.tracking_anchors[last-1], d.event)
}
}
func (d *Decoder) document(rv reflect.Value) {
if d.event.event_type != yaml_DOCUMENT_START_EVENT {
d.error(fmt.Errorf("Expected document start at %s", d.event.start_mark))
}
d.nextEvent()
d.parse(rv)
if d.event.event_type != yaml_DOCUMENT_END_EVENT {
d.error(fmt.Errorf("Expected document end at %s", d.event.start_mark))
}
d.nextEvent()
}
func (d *Decoder) parse(rv reflect.Value) {
if !rv.IsValid() {
// skip ahead since we cannot store
d.valueInterface()
return
}
anchor := string(d.event.anchor)
switch d.event.event_type {
case yaml_SEQUENCE_START_EVENT:
d.begin_anchor(anchor)
d.sequence(rv)
d.end_anchor(anchor)
case yaml_MAPPING_START_EVENT:
d.begin_anchor(anchor)
d.mapping(rv)
d.end_anchor(anchor)
case yaml_SCALAR_EVENT:
d.begin_anchor(anchor)
d.scalar(rv)
d.end_anchor(anchor)
case yaml_ALIAS_EVENT:
d.alias(rv)
case yaml_DOCUMENT_END_EVENT:
default:
d.error(&UnexpectedEventError{
Value: string(d.event.value),
EventType: d.event.event_type,
At: d.event.start_mark,
})
}
}
func (d *Decoder) begin_anchor(anchor string) {
if anchor != "" {
events := []yaml_event_t{d.event}
d.tracking_anchors = append(d.tracking_anchors, events)
}
}
func (d *Decoder) end_anchor(anchor string) {
if anchor != "" {
events := d.tracking_anchors[len(d.tracking_anchors)-1]
d.tracking_anchors = d.tracking_anchors[0 : len(d.tracking_anchors)-1]
// remove the anchor, replaying events shouldn't have anchors
events[0].anchor = nil
// we went one too many, remove the extra event
events = events[:len(events)-1]
// if nested, append to all the other anchors
for i, e := range d.tracking_anchors {
d.tracking_anchors[i] = append(e, events...)
}
d.anchors[anchor] = events
}
}
func (d *Decoder) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, reflect.Value) {
// If v is a named type and is addressable,
// start with its address, so that if the type has pointer methods,
// we find them.
if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() {
v = v.Addr()
}
for {
// Load value from interface, but only if the result will be
// usefully addressable.
if v.Kind() == reflect.Interface && !v.IsNil() {
e := v.Elem()
if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) {
v = e
continue
}
}
if v.Kind() != reflect.Ptr {
break
}
if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() {
break
}
if v.IsNil() {
v.Set(reflect.New(v.Type().Elem()))
}
if v.Type().NumMethod() > 0 {
if u, ok := v.Interface().(Unmarshaler); ok {
var temp interface{}
return u, reflect.ValueOf(&temp)
}
}
v = v.Elem()
}
return nil, v
}
func (d *Decoder) sequence(v reflect.Value) {
if d.event.event_type != yaml_SEQUENCE_START_EVENT {
d.error(fmt.Errorf("Expected sequence start at %s", d.event.start_mark))
}
u, pv := d.indirect(v, false)
if u != nil {
defer func() {
if err := u.UnmarshalYAML(yaml_SEQ_TAG, pv.Interface()); err != nil {
d.error(err)
}
}()
_, pv = d.indirect(pv, false)
}
v = pv
// Check type of target.
switch v.Kind() {
case reflect.Interface:
if v.NumMethod() == 0 {
// Decoding into nil interface? Switch to non-reflect code.
v.Set(reflect.ValueOf(d.sequenceInterface()))
return
}
// Otherwise it's invalid.
fallthrough
default:
d.error(fmt.Errorf("Expected an array, slice or interface{} but was a %s at %s", v, d.event.start_mark))
case reflect.Array:
case reflect.Slice:
break
}
d.nextEvent()
i := 0
done:
for {
switch d.event.event_type {
case yaml_SEQUENCE_END_EVENT, yaml_DOCUMENT_END_EVENT:
break done
}
// Get element of array, growing if necessary.
if v.Kind() == reflect.Slice {
// Grow slice if necessary
if i >= v.Cap() {
newcap := v.Cap() + v.Cap()/2
if newcap < 4 {
newcap = 4
}
newv := reflect.MakeSlice(v.Type(), v.Len(), newcap)
reflect.Copy(newv, v)
v.Set(newv)
}
if i >= v.Len() {
v.SetLen(i + 1)
}
}
if i < v.Len() {
// Decode into element.
d.parse(v.Index(i))
} else {
// Ran out of fixed array: skip.
d.parse(reflect.Value{})
}
i++
}
if i < v.Len() {
if v.Kind() == reflect.Array {
// Array. Zero the rest.
z := reflect.Zero(v.Type().Elem())
for ; i < v.Len(); i++ {
v.Index(i).Set(z)
}
} else {
v.SetLen(i)
}
}
if i == 0 && v.Kind() == reflect.Slice {
v.Set(reflect.MakeSlice(v.Type(), 0, 0))
}
if d.event.event_type != yaml_DOCUMENT_END_EVENT {
d.nextEvent()
}
}
func (d *Decoder) mapping(v reflect.Value) {
u, pv := d.indirect(v, false)
if u != nil {
defer func() {
if err := u.UnmarshalYAML(yaml_MAP_TAG, pv.Interface()); err != nil {
d.error(err)
}
}()
_, pv = d.indirect(pv, false)
}
v = pv
// Decoding into nil interface? Switch to non-reflect code.
if v.Kind() == reflect.Interface && v.NumMethod() == 0 {
v.Set(reflect.ValueOf(d.mappingInterface()))
return
}
// Check type of target: struct or map[X]Y
switch v.Kind() {
case reflect.Struct:
d.mappingStruct(v)
return
case reflect.Map:
default:
d.error(fmt.Errorf("Expected a struct or map but was a %s at %s ", v, d.event.start_mark))
}
mapt := v.Type()
if v.IsNil() {
v.Set(reflect.MakeMap(mapt))
}
d.nextEvent()
keyt := mapt.Key()
mapElemt := mapt.Elem()
var mapElem reflect.Value
done:
for {
switch d.event.event_type {
case yaml_MAPPING_END_EVENT:
break done
case yaml_DOCUMENT_END_EVENT:
return
}
key := reflect.New(keyt)
d.parse(key.Elem())
if !mapElem.IsValid() {
mapElem = reflect.New(mapElemt).Elem()
} else {
mapElem.Set(reflect.Zero(mapElemt))
}
d.parse(mapElem)
v.SetMapIndex(key.Elem(), mapElem)
}
d.nextEvent()
}
func (d *Decoder) mappingStruct(v reflect.Value) {
structt := v.Type()
fields := cachedTypeFields(structt)
d.nextEvent()
done:
for {
switch d.event.event_type {
case yaml_MAPPING_END_EVENT:
break done
case yaml_DOCUMENT_END_EVENT:
return
}
key := ""
d.parse(reflect.ValueOf(&key))
// Figure out field corresponding to key.
var subv reflect.Value
var f *field
for i := range fields {
ff := &fields[i]
if ff.name == key {
f = ff
break
}
if f == nil && strings.EqualFold(ff.name, key) {
f = ff
}
}
if f != nil {
subv = v
for _, i := range f.index {
if subv.Kind() == reflect.Ptr {
if subv.IsNil() {
subv.Set(reflect.New(subv.Type().Elem()))
}
subv = subv.Elem()
}
subv = subv.Field(i)
}
}
d.parse(subv)
}
d.nextEvent()
}
func (d *Decoder) scalar(v reflect.Value) {
val := string(d.event.value)
wantptr := null_values[val]
u, pv := d.indirect(v, wantptr)
var tag string
if u != nil {
defer func() {
if err := u.UnmarshalYAML(tag, pv.Interface()); err != nil {
d.error(err)
}
}()
_, pv = d.indirect(pv, wantptr)
}
v = pv
var err error
tag, err = resolve(d.event, v, d.useNumber)
if err != nil {
d.error(err)
}
d.nextEvent()
}
func (d *Decoder) alias(rv reflect.Value) {
val, ok := d.anchors[string(d.event.anchor)]
if !ok {
d.error(fmt.Errorf("missing anchor: '%s' at %s", d.event.anchor, d.event.start_mark))
}
d.replay_events = val
d.nextEvent()
d.parse(rv)
}
func (d *Decoder) valueInterface() interface{} {
var v interface{}
anchor := string(d.event.anchor)
switch d.event.event_type {
case yaml_SEQUENCE_START_EVENT:
d.begin_anchor(anchor)
v = d.sequenceInterface()
case yaml_MAPPING_START_EVENT:
d.begin_anchor(anchor)
v = d.mappingInterface()
case yaml_SCALAR_EVENT:
d.begin_anchor(anchor)
v = d.scalarInterface()
case yaml_ALIAS_EVENT:
rv := reflect.ValueOf(&v)
d.alias(rv)
return v
case yaml_DOCUMENT_END_EVENT:
d.error(&UnexpectedEventError{
Value: string(d.event.value),
EventType: d.event.event_type,
At: d.event.start_mark,
})
}
d.end_anchor(anchor)
return v
}
func (d *Decoder) scalarInterface() interface{} {
_, v := resolveInterface(d.event, d.useNumber)
d.nextEvent()
return v
}
// sequenceInterface is like sequence but returns []interface{}.
func (d *Decoder) sequenceInterface() []interface{} {
var v = make([]interface{}, 0)
d.nextEvent()
done:
for {
switch d.event.event_type {
case yaml_SEQUENCE_END_EVENT, yaml_DOCUMENT_END_EVENT:
break done
}
v = append(v, d.valueInterface())
}
if d.event.event_type != yaml_DOCUMENT_END_EVENT {
d.nextEvent()
}
return v
}
// mappingInterface is like mapping but returns map[interface{}]interface{}.
func (d *Decoder) mappingInterface() map[interface{}]interface{} {
m := make(map[interface{}]interface{})
d.nextEvent()
done:
for {
switch d.event.event_type {
case yaml_MAPPING_END_EVENT, yaml_DOCUMENT_END_EVENT:
break done
}
key := d.valueInterface()
// Read value.
m[key] = d.valueInterface()
}
if d.event.event_type != yaml_DOCUMENT_END_EVENT {
d.nextEvent()
}
return m
}

View File

@@ -1,395 +0,0 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package candiedyaml
import (
"bytes"
"encoding/base64"
"io"
"math"
"reflect"
"regexp"
"sort"
"strconv"
"time"
)
var (
timeTimeType = reflect.TypeOf(time.Time{})
marshalerType = reflect.TypeOf(new(Marshaler)).Elem()
numberType = reflect.TypeOf(Number(""))
nonPrintable = regexp.MustCompile("[^\t\n\r\u0020-\u007E\u0085\u00A0-\uD7FF\uE000-\uFFFD]")
multiline = regexp.MustCompile("\n|\u0085|\u2028|\u2029")
shortTags = map[string]string{
yaml_NULL_TAG: "!!null",
yaml_BOOL_TAG: "!!bool",
yaml_STR_TAG: "!!str",
yaml_INT_TAG: "!!int",
yaml_FLOAT_TAG: "!!float",
yaml_TIMESTAMP_TAG: "!!timestamp",
yaml_SEQ_TAG: "!!seq",
yaml_MAP_TAG: "!!map",
yaml_BINARY_TAG: "!!binary",
}
)
type Marshaler interface {
MarshalYAML() (tag string, value interface{}, err error)
}
// An Encoder writes JSON objects to an output stream.
type Encoder struct {
w io.Writer
emitter yaml_emitter_t
event yaml_event_t
flow bool
err error
}
func Marshal(v interface{}) ([]byte, error) {
b := bytes.Buffer{}
e := NewEncoder(&b)
err := e.Encode(v)
return b.Bytes(), err
}
// NewEncoder returns a new encoder that writes to w.
func NewEncoder(w io.Writer) *Encoder {
e := &Encoder{w: w}
yaml_emitter_initialize(&e.emitter)
yaml_emitter_set_output_writer(&e.emitter, e.w)
yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING)
e.emit()
yaml_document_start_event_initialize(&e.event, nil, nil, true)
e.emit()
return e
}
func (e *Encoder) Encode(v interface{}) (err error) {
defer recovery(&err)
if e.err != nil {
return e.err
}
e.marshal("", reflect.ValueOf(v), true)
yaml_document_end_event_initialize(&e.event, true)
e.emit()
e.emitter.open_ended = false
yaml_stream_end_event_initialize(&e.event)
e.emit()
return nil
}
func (e *Encoder) emit() {
if !yaml_emitter_emit(&e.emitter, &e.event) {
panic("bad emit")
}
}
func (e *Encoder) marshal(tag string, v reflect.Value, allowAddr bool) {
vt := v.Type()
if vt.Implements(marshalerType) {
e.emitMarshaler(tag, v)
return
}
if vt.Kind() != reflect.Ptr && allowAddr {
if reflect.PtrTo(vt).Implements(marshalerType) {
e.emitAddrMarshaler(tag, v)
return
}
}
switch v.Kind() {
case reflect.Interface:
if v.IsNil() {
e.emitNil()
} else {
e.marshal(tag, v.Elem(), allowAddr)
}
case reflect.Map:
e.emitMap(tag, v)
case reflect.Ptr:
if v.IsNil() {
e.emitNil()
} else {
e.marshal(tag, v.Elem(), true)
}
case reflect.Struct:
e.emitStruct(tag, v)
case reflect.Slice:
e.emitSlice(tag, v)
case reflect.String:
e.emitString(tag, v)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
e.emitInt(tag, v)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
e.emitUint(tag, v)
case reflect.Float32, reflect.Float64:
e.emitFloat(tag, v)
case reflect.Bool:
e.emitBool(tag, v)
default:
panic("Can't marshal type yet: " + v.Type().String())
}
}
func (e *Encoder) emitMap(tag string, v reflect.Value) {
e.mapping(tag, func() {
var keys stringValues = v.MapKeys()
sort.Sort(keys)
for _, k := range keys {
e.marshal("", k, true)
e.marshal("", v.MapIndex(k), true)
}
})
}
func (e *Encoder) emitStruct(tag string, v reflect.Value) {
if v.Type() == timeTimeType {
e.emitTime(tag, v)
return
}
fields := cachedTypeFields(v.Type())
e.mapping(tag, func() {
for _, f := range fields {
fv := fieldByIndex(v, f.index)
if !fv.IsValid() || f.omitEmpty && isEmptyValue(fv) {
continue
}
e.marshal("", reflect.ValueOf(f.name), true)
e.flow = f.flow
e.marshal("", fv, true)
}
})
}
func (e *Encoder) emitTime(tag string, v reflect.Value) {
t := v.Interface().(time.Time)
bytes, _ := t.MarshalText()
e.emitScalar(string(bytes), "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func isEmptyValue(v reflect.Value) bool {
switch v.Kind() {
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
return v.Len() == 0
case reflect.Bool:
return !v.Bool()
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return v.Int() == 0
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
return v.Uint() == 0
case reflect.Float32, reflect.Float64:
return v.Float() == 0
case reflect.Interface, reflect.Ptr:
return v.IsNil()
}
return false
}
func (e *Encoder) mapping(tag string, f func()) {
implicit := tag == ""
style := yaml_BLOCK_MAPPING_STYLE
if e.flow {
e.flow = false
style = yaml_FLOW_MAPPING_STYLE
}
yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
e.emit()
f()
yaml_mapping_end_event_initialize(&e.event)
e.emit()
}
func (e *Encoder) emitSlice(tag string, v reflect.Value) {
if v.Type() == byteSliceType {
e.emitBase64(tag, v)
return
}
implicit := tag == ""
style := yaml_BLOCK_SEQUENCE_STYLE
if e.flow {
e.flow = false
style = yaml_FLOW_SEQUENCE_STYLE
}
yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style)
e.emit()
n := v.Len()
for i := 0; i < n; i++ {
e.marshal("", v.Index(i), true)
}
yaml_sequence_end_event_initialize(&e.event)
e.emit()
}
func (e *Encoder) emitBase64(tag string, v reflect.Value) {
if v.IsNil() {
e.emitNil()
return
}
s := v.Bytes()
dst := make([]byte, base64.StdEncoding.EncodedLen(len(s)))
base64.StdEncoding.Encode(dst, s)
e.emitScalar(string(dst), "", yaml_BINARY_TAG, yaml_DOUBLE_QUOTED_SCALAR_STYLE)
}
func (e *Encoder) emitString(tag string, v reflect.Value) {
var style yaml_scalar_style_t
s := v.String()
if nonPrintable.MatchString(s) {
e.emitBase64(tag, v)
return
}
if v.Type() == numberType {
style = yaml_PLAIN_SCALAR_STYLE
} else {
event := yaml_event_t{
implicit: true,
value: []byte(s),
}
rtag, _ := resolveInterface(event, false)
if tag == "" && rtag != yaml_STR_TAG {
style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
} else if multiline.MatchString(s) {
style = yaml_LITERAL_SCALAR_STYLE
} else {
style = yaml_PLAIN_SCALAR_STYLE
}
}
e.emitScalar(s, "", tag, style)
}
func (e *Encoder) emitBool(tag string, v reflect.Value) {
s := strconv.FormatBool(v.Bool())
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *Encoder) emitInt(tag string, v reflect.Value) {
s := strconv.FormatInt(v.Int(), 10)
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *Encoder) emitUint(tag string, v reflect.Value) {
s := strconv.FormatUint(v.Uint(), 10)
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *Encoder) emitFloat(tag string, v reflect.Value) {
f := v.Float()
var s string
switch {
case math.IsNaN(f):
s = ".nan"
case math.IsInf(f, 1):
s = "+.inf"
case math.IsInf(f, -1):
s = "-.inf"
default:
s = strconv.FormatFloat(f, 'g', -1, v.Type().Bits())
}
e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
}
func (e *Encoder) emitNil() {
e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE)
}
func (e *Encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
implicit := tag == ""
if !implicit {
style = yaml_PLAIN_SCALAR_STYLE
}
stag := shortTags[tag]
if stag == "" {
stag = tag
}
yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(stag), []byte(value), implicit, implicit, style)
e.emit()
}
func (e *Encoder) emitMarshaler(tag string, v reflect.Value) {
if v.Kind() == reflect.Ptr && v.IsNil() {
e.emitNil()
return
}
m := v.Interface().(Marshaler)
if m == nil {
e.emitNil()
return
}
t, val, err := m.MarshalYAML()
if err != nil {
panic(err)
}
if val == nil {
e.emitNil()
return
}
e.marshal(t, reflect.ValueOf(val), false)
}
func (e *Encoder) emitAddrMarshaler(tag string, v reflect.Value) {
if !v.CanAddr() {
e.marshal(tag, v, false)
return
}
va := v.Addr()
if va.IsNil() {
e.emitNil()
return
}
m := v.Interface().(Marshaler)
t, val, err := m.MarshalYAML()
if err != nil {
panic(err)
}
if val == nil {
e.emitNil()
return
}
e.marshal(t, reflect.ValueOf(val), false)
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,465 +0,0 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package candiedyaml
import (
"io"
)
/*
* Set the reader error and return 0.
*/
func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string,
offset int, value int) bool {
parser.error = yaml_READER_ERROR
parser.problem = problem
parser.problem_offset = offset
parser.problem_value = value
return false
}
/*
* Byte order marks.
*/
const (
BOM_UTF8 = "\xef\xbb\xbf"
BOM_UTF16LE = "\xff\xfe"
BOM_UTF16BE = "\xfe\xff"
)
/*
* Determine the input stream encoding by checking the BOM symbol. If no BOM is
* found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
*/
func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
/* Ensure that we had enough bytes in the raw buffer. */
for !parser.eof &&
len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
if !yaml_parser_update_raw_buffer(parser) {
return false
}
}
/* Determine the encoding. */
raw := parser.raw_buffer
pos := parser.raw_buffer_pos
remaining := len(raw) - pos
if remaining >= 2 &&
raw[pos] == BOM_UTF16LE[0] && raw[pos+1] == BOM_UTF16LE[1] {
parser.encoding = yaml_UTF16LE_ENCODING
parser.raw_buffer_pos += 2
parser.offset += 2
} else if remaining >= 2 &&
raw[pos] == BOM_UTF16BE[0] && raw[pos+1] == BOM_UTF16BE[1] {
parser.encoding = yaml_UTF16BE_ENCODING
parser.raw_buffer_pos += 2
parser.offset += 2
} else if remaining >= 3 &&
raw[pos] == BOM_UTF8[0] && raw[pos+1] == BOM_UTF8[1] && raw[pos+2] == BOM_UTF8[2] {
parser.encoding = yaml_UTF8_ENCODING
parser.raw_buffer_pos += 3
parser.offset += 3
} else {
parser.encoding = yaml_UTF8_ENCODING
}
return true
}
/*
* Update the raw buffer.
*/
func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
size_read := 0
/* Return if the raw buffer is full. */
if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
return true
}
/* Return on EOF. */
if parser.eof {
return true
}
/* Move the remaining bytes in the raw buffer to the beginning. */
if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
}
parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
parser.raw_buffer_pos = 0
/* Call the read handler to fill the buffer. */
size_read, err := parser.read_handler(parser,
parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
if err == io.EOF {
parser.eof = true
} else if err != nil {
return yaml_parser_set_reader_error(parser, "input error: "+err.Error(),
parser.offset, -1)
}
return true
}
/*
* Ensure that the buffer contains at least `length` characters.
* Return 1 on success, 0 on failure.
*
* The length is supposed to be significantly less that the buffer size.
*/
func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
/* Read handler must be set. */
if parser.read_handler == nil {
panic("read handler must be set")
}
/* If the EOF flag is set and the raw buffer is empty, do nothing. */
if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
return true
}
/* Return if the buffer contains enough characters. */
if parser.unread >= length {
return true
}
/* Determine the input encoding if it is not known yet. */
if parser.encoding == yaml_ANY_ENCODING {
if !yaml_parser_determine_encoding(parser) {
return false
}
}
/* Move the unread characters to the beginning of the buffer. */
buffer_end := len(parser.buffer)
if 0 < parser.buffer_pos &&
parser.buffer_pos < buffer_end {
copy(parser.buffer, parser.buffer[parser.buffer_pos:])
buffer_end -= parser.buffer_pos
parser.buffer_pos = 0
} else if parser.buffer_pos == buffer_end {
buffer_end = 0
parser.buffer_pos = 0
}
parser.buffer = parser.buffer[:cap(parser.buffer)]
/* Fill the buffer until it has enough characters. */
first := true
for parser.unread < length {
/* Fill the raw buffer if necessary. */
if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
if !yaml_parser_update_raw_buffer(parser) {
parser.buffer = parser.buffer[:buffer_end]
return false
}
}
first = false
/* Decode the raw buffer. */
for parser.raw_buffer_pos != len(parser.raw_buffer) {
var value rune
var w int
raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
incomplete := false
/* Decode the next character. */
switch parser.encoding {
case yaml_UTF8_ENCODING:
/*
* Decode a UTF-8 character. Check RFC 3629
* (http://www.ietf.org/rfc/rfc3629.txt) for more details.
*
* The following table (taken from the RFC) is used for
* decoding.
*
* Char. number range | UTF-8 octet sequence
* (hexadecimal) | (binary)
* --------------------+------------------------------------
* 0000 0000-0000 007F | 0xxxxxxx
* 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
* 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
* 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
*
* Additionally, the characters in the range 0xD800-0xDFFF
* are prohibited as they are reserved for use with UTF-16
* surrogate pairs.
*/
/* Determine the length of the UTF-8 sequence. */
octet := parser.raw_buffer[parser.raw_buffer_pos]
w = width(octet)
/* Check if the leading octet is valid. */
if w == 0 {
return yaml_parser_set_reader_error(parser,
"invalid leading UTF-8 octet",
parser.offset, int(octet))
}
/* Check if the raw buffer contains an incomplete character. */
if w > raw_unread {
if parser.eof {
return yaml_parser_set_reader_error(parser,
"incomplete UTF-8 octet sequence",
parser.offset, -1)
}
incomplete = true
break
}
/* Decode the leading octet. */
switch {
case octet&0x80 == 0x00:
value = rune(octet & 0x7F)
case octet&0xE0 == 0xC0:
value = rune(octet & 0x1F)
case octet&0xF0 == 0xE0:
value = rune(octet & 0x0F)
case octet&0xF8 == 0xF0:
value = rune(octet & 0x07)
default:
value = 0
}
/* Check and decode the trailing octets. */
for k := 1; k < w; k++ {
octet = parser.raw_buffer[parser.raw_buffer_pos+k]
/* Check if the octet is valid. */
if (octet & 0xC0) != 0x80 {
return yaml_parser_set_reader_error(parser,
"invalid trailing UTF-8 octet",
parser.offset+k, int(octet))
}
/* Decode the octet. */
value = (value << 6) + rune(octet&0x3F)
}
/* Check the length of the sequence against the value. */
switch {
case w == 1:
case w == 2 && value >= 0x80:
case w == 3 && value >= 0x800:
case w == 4 && value >= 0x10000:
default:
return yaml_parser_set_reader_error(parser,
"invalid length of a UTF-8 sequence",
parser.offset, -1)
}
/* Check the range of the value. */
if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF {
return yaml_parser_set_reader_error(parser,
"invalid Unicode character",
parser.offset, int(value))
}
case yaml_UTF16LE_ENCODING,
yaml_UTF16BE_ENCODING:
var low, high int
if parser.encoding == yaml_UTF16LE_ENCODING {
low, high = 0, 1
} else {
high, low = 1, 0
}
/*
* The UTF-16 encoding is not as simple as one might
* naively think. Check RFC 2781
* (http://www.ietf.org/rfc/rfc2781.txt).
*
* Normally, two subsequent bytes describe a Unicode
* character. However a special technique (called a
* surrogate pair) is used for specifying character
* values larger than 0xFFFF.
*
* A surrogate pair consists of two pseudo-characters:
* high surrogate area (0xD800-0xDBFF)
* low surrogate area (0xDC00-0xDFFF)
*
* The following formulas are used for decoding
* and encoding characters using surrogate pairs:
*
* U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
* U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
* W1 = 110110yyyyyyyyyy
* W2 = 110111xxxxxxxxxx
*
* where U is the character value, W1 is the high surrogate
* area, W2 is the low surrogate area.
*/
/* Check for incomplete UTF-16 character. */
if raw_unread < 2 {
if parser.eof {
return yaml_parser_set_reader_error(parser,
"incomplete UTF-16 character",
parser.offset, -1)
}
incomplete = true
break
}
/* Get the character. */
value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
(rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
/* Check for unexpected low surrogate area. */
if (value & 0xFC00) == 0xDC00 {
return yaml_parser_set_reader_error(parser,
"unexpected low surrogate area",
parser.offset, int(value))
}
/* Check for a high surrogate area. */
if (value & 0xFC00) == 0xD800 {
w = 4
/* Check for incomplete surrogate pair. */
if raw_unread < 4 {
if parser.eof {
return yaml_parser_set_reader_error(parser,
"incomplete UTF-16 surrogate pair",
parser.offset, -1)
}
incomplete = true
break
}
/* Get the next character. */
value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
(rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
/* Check for a low surrogate area. */
if (value2 & 0xFC00) != 0xDC00 {
return yaml_parser_set_reader_error(parser,
"expected low surrogate area",
parser.offset+2, int(value2))
}
/* Generate the value of the surrogate pair. */
value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
} else {
w = 2
}
break
default:
panic("Impossible") /* Impossible. */
}
/* Check if the raw buffer contains enough bytes to form a character. */
if incomplete {
break
}
/*
* Check if the character is in the allowed range:
* #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
* | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
* | [#x10000-#x10FFFF] (32 bit)
*/
if !(value == 0x09 || value == 0x0A || value == 0x0D ||
(value >= 0x20 && value <= 0x7E) ||
(value == 0x85) || (value >= 0xA0 && value <= 0xD7FF) ||
(value >= 0xE000 && value <= 0xFFFD) ||
(value >= 0x10000 && value <= 0x10FFFF)) {
return yaml_parser_set_reader_error(parser,
"control characters are not allowed",
parser.offset, int(value))
}
/* Move the raw pointers. */
parser.raw_buffer_pos += w
parser.offset += w
/* Finally put the character into the buffer. */
/* 0000 0000-0000 007F . 0xxxxxxx */
if value <= 0x7F {
parser.buffer[buffer_end] = byte(value)
} else if value <= 0x7FF {
/* 0000 0080-0000 07FF . 110xxxxx 10xxxxxx */
parser.buffer[buffer_end] = byte(0xC0 + (value >> 6))
parser.buffer[buffer_end+1] = byte(0x80 + (value & 0x3F))
} else if value <= 0xFFFF {
/* 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx */
parser.buffer[buffer_end] = byte(0xE0 + (value >> 12))
parser.buffer[buffer_end+1] = byte(0x80 + ((value >> 6) & 0x3F))
parser.buffer[buffer_end+2] = byte(0x80 + (value & 0x3F))
} else {
/* 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx */
parser.buffer[buffer_end] = byte(0xF0 + (value >> 18))
parser.buffer[buffer_end+1] = byte(0x80 + ((value >> 12) & 0x3F))
parser.buffer[buffer_end+2] = byte(0x80 + ((value >> 6) & 0x3F))
parser.buffer[buffer_end+3] = byte(0x80 + (value & 0x3F))
}
buffer_end += w
parser.unread++
}
/* On EOF, put NUL into the buffer and return. */
if parser.eof {
parser.buffer[buffer_end] = 0
buffer_end++
parser.buffer = parser.buffer[:buffer_end]
parser.unread++
return true
}
}
parser.buffer = parser.buffer[:buffer_end]
return true
}

View File

@@ -1,449 +0,0 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package candiedyaml
import (
"bytes"
"encoding/base64"
"fmt"
"math"
"reflect"
"regexp"
"strconv"
"strings"
"time"
)
var byteSliceType = reflect.TypeOf([]byte(nil))
var binary_tags = [][]byte{[]byte("!binary"), []byte(yaml_BINARY_TAG)}
var bool_values map[string]bool
var null_values map[string]bool
var signs = []byte{'-', '+'}
var nulls = []byte{'~', 'n', 'N'}
var bools = []byte{'t', 'T', 'f', 'F', 'y', 'Y', 'n', 'N', 'o', 'O'}
var timestamp_regexp *regexp.Regexp
var ymd_regexp *regexp.Regexp
func init() {
bool_values = make(map[string]bool)
bool_values["y"] = true
bool_values["yes"] = true
bool_values["n"] = false
bool_values["no"] = false
bool_values["true"] = true
bool_values["false"] = false
bool_values["on"] = true
bool_values["off"] = false
null_values = make(map[string]bool)
null_values["~"] = true
null_values["null"] = true
null_values["Null"] = true
null_values["NULL"] = true
timestamp_regexp = regexp.MustCompile("^([0-9][0-9][0-9][0-9])-([0-9][0-9]?)-([0-9][0-9]?)(?:(?:[Tt]|[ \t]+)([0-9][0-9]?):([0-9][0-9]):([0-9][0-9])(?:\\.([0-9]*))?(?:[ \t]*(?:Z|([-+][0-9][0-9]?)(?::([0-9][0-9])?)?))?)?$")
ymd_regexp = regexp.MustCompile("^([0-9][0-9][0-9][0-9])-([0-9][0-9]?)-([0-9][0-9]?)$")
}
func resolve(event yaml_event_t, v reflect.Value, useNumber bool) (string, error) {
val := string(event.value)
if null_values[val] {
v.Set(reflect.Zero(v.Type()))
return yaml_NULL_TAG, nil
}
switch v.Kind() {
case reflect.String:
if useNumber && v.Type() == numberType {
tag, i := resolveInterface(event, useNumber)
if n, ok := i.(Number); ok {
v.Set(reflect.ValueOf(n))
return tag, nil
}
return "", fmt.Errorf("Not a number: '%s' at %s", event.value, event.start_mark)
}
return resolve_string(val, v, event)
case reflect.Bool:
return resolve_bool(val, v, event)
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return resolve_int(val, v, useNumber, event)
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
return resolve_uint(val, v, useNumber, event)
case reflect.Float32, reflect.Float64:
return resolve_float(val, v, useNumber, event)
case reflect.Interface:
_, i := resolveInterface(event, useNumber)
if i != nil {
v.Set(reflect.ValueOf(i))
} else {
v.Set(reflect.Zero(v.Type()))
}
case reflect.Struct:
return resolve_time(val, v, event)
case reflect.Slice:
if v.Type() != byteSliceType {
return "", fmt.Errorf("Cannot resolve %s into %s at %s", val, v.String(), event.start_mark)
}
b, err := decode_binary(event.value, event)
if err != nil {
return "", err
}
v.Set(reflect.ValueOf(b))
default:
return "", fmt.Errorf("Unknown resolution for '%s' using %s at %s", val, v.String(), event.start_mark)
}
return yaml_STR_TAG, nil
}
func hasBinaryTag(event yaml_event_t) bool {
for _, tag := range binary_tags {
if bytes.Equal(event.tag, tag) {
return true
}
}
return false
}
func decode_binary(value []byte, event yaml_event_t) ([]byte, error) {
b := make([]byte, base64.StdEncoding.DecodedLen(len(value)))
n, err := base64.StdEncoding.Decode(b, value)
if err != nil {
return nil, fmt.Errorf("Invalid base64 text: '%s' at %s", string(b), event.start_mark)
}
return b[:n], nil
}
func resolve_string(val string, v reflect.Value, event yaml_event_t) (string, error) {
if len(event.tag) > 0 {
if hasBinaryTag(event) {
b, err := decode_binary(event.value, event)
if err != nil {
return "", err
}
val = string(b)
}
}
v.SetString(val)
return yaml_STR_TAG, nil
}
func resolve_bool(val string, v reflect.Value, event yaml_event_t) (string, error) {
b, found := bool_values[strings.ToLower(val)]
if !found {
return "", fmt.Errorf("Invalid boolean: '%s' at %s", val, event.start_mark)
}
v.SetBool(b)
return yaml_BOOL_TAG, nil
}
func resolve_int(val string, v reflect.Value, useNumber bool, event yaml_event_t) (string, error) {
original := val
val = strings.Replace(val, "_", "", -1)
var value uint64
isNumberValue := v.Type() == numberType
sign := int64(1)
if val[0] == '-' {
sign = -1
val = val[1:]
} else if val[0] == '+' {
val = val[1:]
}
base := 0
if val == "0" {
if isNumberValue {
v.SetString("0")
} else {
v.Set(reflect.Zero(v.Type()))
}
return yaml_INT_TAG, nil
}
if strings.HasPrefix(val, "0o") {
base = 8
val = val[2:]
}
value, err := strconv.ParseUint(val, base, 64)
if err != nil {
return "", fmt.Errorf("Invalid integer: '%s' at %s", original, event.start_mark)
}
var val64 int64
if value <= math.MaxInt64 {
val64 = int64(value)
if sign == -1 {
val64 = -val64
}
} else if sign == -1 && value == uint64(math.MaxInt64)+1 {
val64 = math.MinInt64
} else {
return "", fmt.Errorf("Invalid integer: '%s' at %s", original, event.start_mark)
}
if isNumberValue {
v.SetString(strconv.FormatInt(val64, 10))
} else {
if v.OverflowInt(val64) {
return "", fmt.Errorf("Invalid integer: '%s' at %s", original, event.start_mark)
}
v.SetInt(val64)
}
return yaml_INT_TAG, nil
}
func resolve_uint(val string, v reflect.Value, useNumber bool, event yaml_event_t) (string, error) {
original := val
val = strings.Replace(val, "_", "", -1)
var value uint64
isNumberValue := v.Type() == numberType
if val[0] == '-' {
return "", fmt.Errorf("Unsigned int with negative value: '%s' at %s", original, event.start_mark)
}
if val[0] == '+' {
val = val[1:]
}
base := 0
if val == "0" {
if isNumberValue {
v.SetString("0")
} else {
v.Set(reflect.Zero(v.Type()))
}
return yaml_INT_TAG, nil
}
if strings.HasPrefix(val, "0o") {
base = 8
val = val[2:]
}
value, err := strconv.ParseUint(val, base, 64)
if err != nil {
return "", fmt.Errorf("Invalid unsigned integer: '%s' at %s", val, event.start_mark)
}
if isNumberValue {
v.SetString(strconv.FormatUint(value, 10))
} else {
if v.OverflowUint(value) {
return "", fmt.Errorf("Invalid unsigned integer: '%s' at %s", val, event.start_mark)
}
v.SetUint(value)
}
return yaml_INT_TAG, nil
}
func resolve_float(val string, v reflect.Value, useNumber bool, event yaml_event_t) (string, error) {
val = strings.Replace(val, "_", "", -1)
var value float64
isNumberValue := v.Type() == numberType
typeBits := 64
if !isNumberValue {
typeBits = v.Type().Bits()
}
sign := 1
if val[0] == '-' {
sign = -1
val = val[1:]
} else if val[0] == '+' {
val = val[1:]
}
valLower := strings.ToLower(val)
if valLower == ".inf" {
value = math.Inf(sign)
} else if valLower == ".nan" {
value = math.NaN()
} else {
var err error
value, err = strconv.ParseFloat(val, typeBits)
value *= float64(sign)
if err != nil {
return "", fmt.Errorf("Invalid float: '%s' at %s", val, event.start_mark)
}
}
if isNumberValue {
v.SetString(strconv.FormatFloat(value, 'g', -1, typeBits))
} else {
if v.OverflowFloat(value) {
return "", fmt.Errorf("Invalid float: '%s' at %s", val, event.start_mark)
}
v.SetFloat(value)
}
return yaml_FLOAT_TAG, nil
}
func resolve_time(val string, v reflect.Value, event yaml_event_t) (string, error) {
var parsedTime time.Time
matches := ymd_regexp.FindStringSubmatch(val)
if len(matches) > 0 {
year, _ := strconv.Atoi(matches[1])
month, _ := strconv.Atoi(matches[2])
day, _ := strconv.Atoi(matches[3])
parsedTime = time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.UTC)
} else {
matches = timestamp_regexp.FindStringSubmatch(val)
if len(matches) == 0 {
return "", fmt.Errorf("Invalid timestamp: '%s' at %s", val, event.start_mark)
}
year, _ := strconv.Atoi(matches[1])
month, _ := strconv.Atoi(matches[2])
day, _ := strconv.Atoi(matches[3])
hour, _ := strconv.Atoi(matches[4])
min, _ := strconv.Atoi(matches[5])
sec, _ := strconv.Atoi(matches[6])
nsec := 0
if matches[7] != "" {
millis, _ := strconv.Atoi(matches[7])
nsec = int(time.Duration(millis) * time.Millisecond)
}
loc := time.UTC
if matches[8] != "" {
sign := matches[8][0]
hr, _ := strconv.Atoi(matches[8][1:])
min := 0
if matches[9] != "" {
min, _ = strconv.Atoi(matches[9])
}
zoneOffset := (hr*60 + min) * 60
if sign == '-' {
zoneOffset = -zoneOffset
}
loc = time.FixedZone("", zoneOffset)
}
parsedTime = time.Date(year, time.Month(month), day, hour, min, sec, nsec, loc)
}
v.Set(reflect.ValueOf(parsedTime))
return "", nil
}
func resolveInterface(event yaml_event_t, useNumber bool) (string, interface{}) {
val := string(event.value)
if len(event.tag) == 0 && !event.implicit {
return "", val
}
if len(val) == 0 {
return yaml_NULL_TAG, nil
}
var result interface{}
sign := false
c := val[0]
switch {
case bytes.IndexByte(signs, c) != -1:
sign = true
fallthrough
case c >= '0' && c <= '9':
i := int64(0)
result = &i
if useNumber {
var n Number
result = &n
}
v := reflect.ValueOf(result).Elem()
if _, err := resolve_int(val, v, useNumber, event); err == nil {
return yaml_INT_TAG, v.Interface()
}
f := float64(0)
result = &f
if useNumber {
var n Number
result = &n
}
v = reflect.ValueOf(result).Elem()
if _, err := resolve_float(val, v, useNumber, event); err == nil {
return yaml_FLOAT_TAG, v.Interface()
}
if !sign {
t := time.Time{}
if _, err := resolve_time(val, reflect.ValueOf(&t).Elem(), event); err == nil {
return "", t
}
}
case bytes.IndexByte(nulls, c) != -1:
if null_values[val] {
return yaml_NULL_TAG, nil
}
b := false
if _, err := resolve_bool(val, reflect.ValueOf(&b).Elem(), event); err == nil {
return yaml_BOOL_TAG, b
}
case c == '.':
f := float64(0)
result = &f
if useNumber {
var n Number
result = &n
}
v := reflect.ValueOf(result).Elem()
if _, err := resolve_float(val, v, useNumber, event); err == nil {
return yaml_FLOAT_TAG, v.Interface()
}
case bytes.IndexByte(bools, c) != -1:
b := false
if _, err := resolve_bool(val, reflect.ValueOf(&b).Elem(), event); err == nil {
return yaml_BOOL_TAG, b
}
}
if hasBinaryTag(event) {
bytes, err := decode_binary(event.value, event)
if err == nil {
return yaml_BINARY_TAG, bytes
}
}
return yaml_STR_TAG, val
}

View File

@@ -1,62 +0,0 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package candiedyaml
import (
"fmt"
"os"
)
func Run_parser(cmd string, args []string) {
for i := 0; i < len(args); i++ {
fmt.Printf("[%d] Scanning '%s'", i, args[i])
file, err := os.Open(args[i])
if err != nil {
panic(fmt.Sprintf("Invalid file '%s': %s", args[i], err.Error()))
}
parser := yaml_parser_t{}
yaml_parser_initialize(&parser)
yaml_parser_set_input_reader(&parser, file)
failed := false
token := yaml_token_t{}
count := 0
for {
if !yaml_parser_scan(&parser, &token) {
failed = true
break
}
if token.token_type == yaml_STREAM_END_TOKEN {
break
}
count++
}
file.Close()
msg := "SUCCESS"
if failed {
msg = "FAILED"
if parser.error != yaml_NO_ERROR {
m := parser.problem_mark
fmt.Printf("ERROR: (%s) %s @ line: %d col: %d\n",
parser.context, parser.problem, m.line, m.column)
}
}
fmt.Printf("%s (%d tokens)\n", msg, count)
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,360 +0,0 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package candiedyaml
import (
"reflect"
"sort"
"strings"
"sync"
"unicode"
)
// A field represents a single field found in a struct.
type field struct {
name string
tag bool
index []int
typ reflect.Type
omitEmpty bool
flow bool
}
// byName sorts field by name, breaking ties with depth,
// then breaking ties with "name came from json tag", then
// breaking ties with index sequence.
type byName []field
func (x byName) Len() int { return len(x) }
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byName) Less(i, j int) bool {
if x[i].name != x[j].name {
return x[i].name < x[j].name
}
if len(x[i].index) != len(x[j].index) {
return len(x[i].index) < len(x[j].index)
}
if x[i].tag != x[j].tag {
return x[i].tag
}
return byIndex(x).Less(i, j)
}
// byIndex sorts field by index sequence.
type byIndex []field
func (x byIndex) Len() int { return len(x) }
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byIndex) Less(i, j int) bool {
for k, xik := range x[i].index {
if k >= len(x[j].index) {
return false
}
if xik != x[j].index[k] {
return xik < x[j].index[k]
}
}
return len(x[i].index) < len(x[j].index)
}
// typeFields returns a list of fields that JSON should recognize for the given type.
// The algorithm is breadth-first search over the set of structs to include - the top struct
// and then any reachable anonymous structs.
func typeFields(t reflect.Type) []field {
// Anonymous fields to explore at the current level and the next.
current := []field{}
next := []field{{typ: t}}
// Count of queued names for current level and the next.
count := map[reflect.Type]int{}
nextCount := map[reflect.Type]int{}
// Types already visited at an earlier level.
visited := map[reflect.Type]bool{}
// Fields found.
var fields []field
for len(next) > 0 {
current, next = next, current[:0]
count, nextCount = nextCount, map[reflect.Type]int{}
for _, f := range current {
if visited[f.typ] {
continue
}
visited[f.typ] = true
// Scan f.typ for fields to include.
for i := 0; i < f.typ.NumField(); i++ {
sf := f.typ.Field(i)
if sf.PkgPath != "" { // unexported
continue
}
tag := sf.Tag.Get("yaml")
if tag == "-" {
continue
}
name, opts := parseTag(tag)
if !isValidTag(name) {
name = ""
}
index := make([]int, len(f.index)+1)
copy(index, f.index)
index[len(f.index)] = i
ft := sf.Type
if ft.Name() == "" && ft.Kind() == reflect.Ptr {
// Follow pointer.
ft = ft.Elem()
}
// Record found field and index sequence.
if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
tagged := name != ""
if name == "" {
name = sf.Name
}
fields = append(fields, field{name, tagged, index, ft,
opts.Contains("omitempty"), opts.Contains("flow")})
if count[f.typ] > 1 {
// If there were multiple instances, add a second,
// so that the annihilation code will see a duplicate.
// It only cares about the distinction between 1 or 2,
// so don't bother generating any more copies.
fields = append(fields, fields[len(fields)-1])
}
continue
}
// Record new anonymous struct to explore in next round.
nextCount[ft]++
if nextCount[ft] == 1 {
next = append(next, field{name: ft.Name(), index: index, typ: ft})
}
}
}
}
sort.Sort(byName(fields))
// Delete all fields that are hidden by the Go rules for embedded fields,
// except that fields with JSON tags are promoted.
// The fields are sorted in primary order of name, secondary order
// of field index length. Loop over names; for each name, delete
// hidden fields by choosing the one dominant field that survives.
out := fields[:0]
for advance, i := 0, 0; i < len(fields); i += advance {
// One iteration per name.
// Find the sequence of fields with the name of this first field.
fi := fields[i]
name := fi.name
for advance = 1; i+advance < len(fields); advance++ {
fj := fields[i+advance]
if fj.name != name {
break
}
}
if advance == 1 { // Only one field with this name
out = append(out, fi)
continue
}
dominant, ok := dominantField(fields[i : i+advance])
if ok {
out = append(out, dominant)
}
}
fields = out
sort.Sort(byIndex(fields))
return fields
}
// dominantField looks through the fields, all of which are known to
// have the same name, to find the single field that dominates the
// others using Go's embedding rules, modified by the presence of
// JSON tags. If there are multiple top-level fields, the boolean
// will be false: This condition is an error in Go and we skip all
// the fields.
func dominantField(fields []field) (field, bool) {
// The fields are sorted in increasing index-length order. The winner
// must therefore be one with the shortest index length. Drop all
// longer entries, which is easy: just truncate the slice.
length := len(fields[0].index)
tagged := -1 // Index of first tagged field.
for i, f := range fields {
if len(f.index) > length {
fields = fields[:i]
break
}
if f.tag {
if tagged >= 0 {
// Multiple tagged fields at the same level: conflict.
// Return no field.
return field{}, false
}
tagged = i
}
}
if tagged >= 0 {
return fields[tagged], true
}
// All remaining fields have the same length. If there's more than one,
// we have a conflict (two fields named "X" at the same level) and we
// return no field.
if len(fields) > 1 {
return field{}, false
}
return fields[0], true
}
var fieldCache struct {
sync.RWMutex
m map[reflect.Type][]field
}
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
func cachedTypeFields(t reflect.Type) []field {
fieldCache.RLock()
f := fieldCache.m[t]
fieldCache.RUnlock()
if f != nil {
return f
}
// Compute fields without lock.
// Might duplicate effort but won't hold other computations back.
f = typeFields(t)
if f == nil {
f = []field{}
}
fieldCache.Lock()
if fieldCache.m == nil {
fieldCache.m = map[reflect.Type][]field{}
}
fieldCache.m[t] = f
fieldCache.Unlock()
return f
}
// tagOptions is the string following a comma in a struct field's "json"
// tag, or the empty string. It does not include the leading comma.
type tagOptions string
func isValidTag(s string) bool {
if s == "" {
return false
}
for _, c := range s {
switch {
case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
// Backslash and quote chars are reserved, but
// otherwise any punctuation chars are allowed
// in a tag name.
default:
if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
return false
}
}
}
return true
}
func fieldByIndex(v reflect.Value, index []int) reflect.Value {
for _, i := range index {
if v.Kind() == reflect.Ptr {
if v.IsNil() {
return reflect.Value{}
}
v = v.Elem()
}
v = v.Field(i)
}
return v
}
func typeByIndex(t reflect.Type, index []int) reflect.Type {
for _, i := range index {
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
t = t.Field(i).Type
}
return t
}
// stringValues is a slice of reflect.Value holding *reflect.StringValue.
// It implements the methods to sort by string.
type stringValues []reflect.Value
func (sv stringValues) Len() int { return len(sv) }
func (sv stringValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] }
func (sv stringValues) Less(i, j int) bool {
av, ak := getElem(sv[i])
bv, bk := getElem(sv[j])
if ak == reflect.String && bk == reflect.String {
return av.String() < bv.String()
}
return ak < bk
}
func getElem(v reflect.Value) (reflect.Value, reflect.Kind) {
k := v.Kind()
for k == reflect.Interface || k == reflect.Ptr && !v.IsNil() {
v = v.Elem()
k = v.Kind()
}
return v, k
}
// parseTag splits a struct field's json tag into its name and
// comma-separated options.
func parseTag(tag string) (string, tagOptions) {
if idx := strings.Index(tag, ","); idx != -1 {
return tag[:idx], tagOptions(tag[idx+1:])
}
return tag, tagOptions("")
}
// Contains reports whether a comma-separated list of options
// contains a particular substr flag. substr must be surrounded by a
// string boundary or commas.
func (o tagOptions) Contains(optionName string) bool {
if len(o) == 0 {
return false
}
s := string(o)
for s != "" {
var next string
i := strings.Index(s, ",")
if i >= 0 {
s, next = s[:i], s[i+1:]
}
if s == optionName {
return true
}
s = next
}
return false
}

View File

@@ -1,891 +0,0 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package candiedyaml
const (
INPUT_RAW_BUFFER_SIZE = 1024
/*
* The size of the input buffer.
*
* It should be possible to decode the whole raw buffer.
*/
INPUT_BUFFER_SIZE = (INPUT_RAW_BUFFER_SIZE * 3)
/*
* The size of the output buffer.
*/
OUTPUT_BUFFER_SIZE = 512
/*
* The size of the output raw buffer.
*
* It should be possible to encode the whole output buffer.
*/
OUTPUT_RAW_BUFFER_SIZE = (OUTPUT_BUFFER_SIZE*2 + 2)
INITIAL_STACK_SIZE = 16
INITIAL_QUEUE_SIZE = 16
)
func width(b byte) int {
if b&0x80 == 0 {
return 1
}
if b&0xE0 == 0xC0 {
return 2
}
if b&0xF0 == 0xE0 {
return 3
}
if b&0xF8 == 0xF0 {
return 4
}
return 0
}
func copy_bytes(dest []byte, dest_pos *int, src []byte, src_pos *int) {
w := width(src[*src_pos])
switch w {
case 4:
dest[*dest_pos+3] = src[*src_pos+3]
fallthrough
case 3:
dest[*dest_pos+2] = src[*src_pos+2]
fallthrough
case 2:
dest[*dest_pos+1] = src[*src_pos+1]
fallthrough
case 1:
dest[*dest_pos] = src[*src_pos]
default:
panic("invalid width")
}
*dest_pos += w
*src_pos += w
}
// /*
// * Check if the character at the specified position is an alphabetical
// * character, a digit, '_', or '-'.
// */
func is_alpha(b byte) bool {
return (b >= '0' && b <= '9') ||
(b >= 'A' && b <= 'Z') ||
(b >= 'a' && b <= 'z') ||
b == '_' || b == '-'
}
// /*
// * Check if the character at the specified position is a digit.
// */
//
func is_digit(b byte) bool {
return b >= '0' && b <= '9'
}
// /*
// * Get the value of a digit.
// */
//
func as_digit(b byte) int {
return int(b) - '0'
}
// /*
// * Check if the character at the specified position is a hex-digit.
// */
//
func is_hex(b byte) bool {
return (b >= '0' && b <= '9') ||
(b >= 'A' && b <= 'F') ||
(b >= 'a' && b <= 'f')
}
//
// /*
// * Get the value of a hex-digit.
// */
//
func as_hex(b byte) int {
if b >= 'A' && b <= 'F' {
return int(b) - 'A' + 10
} else if b >= 'a' && b <= 'f' {
return int(b) - 'a' + 10
}
return int(b) - '0'
}
// #define AS_HEX_AT(string,offset) \
// (((string).pointer[offset] >= (yaml_char_t) 'A' && \
// (string).pointer[offset] <= (yaml_char_t) 'F') ? \
// ((string).pointer[offset] - (yaml_char_t) 'A' + 10) : \
// ((string).pointer[offset] >= (yaml_char_t) 'a' && \
// (string).pointer[offset] <= (yaml_char_t) 'f') ? \
// ((string).pointer[offset] - (yaml_char_t) 'a' + 10) : \
// ((string).pointer[offset] - (yaml_char_t) '0'))
// /*
// * Check if the character is a line break, space, tab, or NUL.
// */
func is_blankz_at(b []byte, i int) bool {
return is_blank(b[i]) || is_breakz_at(b, i)
}
// /*
// * Check if the character at the specified position is a line break.
// */
func is_break_at(b []byte, i int) bool {
return b[i] == '\r' || /* CR (#xD)*/
b[i] == '\n' || /* LF (#xA) */
(b[i] == 0xC2 && b[i+1] == 0x85) || /* NEL (#x85) */
(b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8) || /* LS (#x2028) */
(b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) /* PS (#x2029) */
}
func is_breakz_at(b []byte, i int) bool {
return is_break_at(b, i) || is_z(b[i])
}
func is_crlf_at(b []byte, i int) bool {
return b[i] == '\r' && b[i+1] == '\n'
}
// /*
// * Check if the character at the specified position is NUL.
// */
func is_z(b byte) bool {
return b == 0x0
}
// /*
// * Check if the character at the specified position is space.
// */
func is_space(b byte) bool {
return b == ' '
}
//
// /*
// * Check if the character at the specified position is tab.
// */
func is_tab(b byte) bool {
return b == '\t'
}
// /*
// * Check if the character at the specified position is blank (space or tab).
// */
func is_blank(b byte) bool {
return is_space(b) || is_tab(b)
}
// /*
// * Check if the character is ASCII.
// */
func is_ascii(b byte) bool {
return b <= '\x7f'
}
// /*
// * Check if the character can be printed unescaped.
// */
func is_printable_at(b []byte, i int) bool {
return ((b[i] == 0x0A) || /* . == #x0A */
(b[i] >= 0x20 && b[i] <= 0x7E) || /* #x20 <= . <= #x7E */
(b[i] == 0xC2 && b[i+1] >= 0xA0) || /* #0xA0 <= . <= #xD7FF */
(b[i] > 0xC2 && b[i] < 0xED) ||
(b[i] == 0xED && b[i+1] < 0xA0) ||
(b[i] == 0xEE) ||
(b[i] == 0xEF && /* && . != #xFEFF */
!(b[i+1] == 0xBB && b[i+2] == 0xBF) &&
!(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
}
func insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
// collapse the slice
if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
if parser.tokens_head != len(parser.tokens) {
// move the tokens down
copy(parser.tokens, parser.tokens[parser.tokens_head:])
}
// readjust the length
parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
parser.tokens_head = 0
}
parser.tokens = append(parser.tokens, *token)
if pos < 0 {
return
}
copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
parser.tokens[parser.tokens_head+pos] = *token
}
// /*
// * Check if the character at the specified position is BOM.
// */
//
func is_bom_at(b []byte, i int) bool {
return b[i] == 0xEF && b[i+1] == 0xBB && b[i+2] == 0xBF
}
//
// #ifdef HAVE_CONFIG_H
// #include <config.h>
// #endif
//
// #include "./yaml.h"
//
// #include <assert.h>
// #include <limits.h>
//
// /*
// * Memory management.
// */
//
// yaml_DECLARE(void *)
// yaml_malloc(size_t size);
//
// yaml_DECLARE(void *)
// yaml_realloc(void *ptr, size_t size);
//
// yaml_DECLARE(void)
// yaml_free(void *ptr);
//
// yaml_DECLARE(yaml_char_t *)
// yaml_strdup(const yaml_char_t *);
//
// /*
// * Reader: Ensure that the buffer contains at least `length` characters.
// */
//
// yaml_DECLARE(int)
// yaml_parser_update_buffer(yaml_parser_t *parser, size_t length);
//
// /*
// * Scanner: Ensure that the token stack contains at least one token ready.
// */
//
// yaml_DECLARE(int)
// yaml_parser_fetch_more_tokens(yaml_parser_t *parser);
//
// /*
// * The size of the input raw buffer.
// */
//
// #define INPUT_RAW_BUFFER_SIZE 16384
//
// /*
// * The size of the input buffer.
// *
// * It should be possible to decode the whole raw buffer.
// */
//
// #define INPUT_BUFFER_SIZE (INPUT_RAW_BUFFER_SIZE*3)
//
// /*
// * The size of the output buffer.
// */
//
// #define OUTPUT_BUFFER_SIZE 16384
//
// /*
// * The size of the output raw buffer.
// *
// * It should be possible to encode the whole output buffer.
// */
//
// #define OUTPUT_RAW_BUFFER_SIZE (OUTPUT_BUFFER_SIZE*2+2)
//
// /*
// * The size of other stacks and queues.
// */
//
// #define INITIAL_STACK_SIZE 16
// #define INITIAL_QUEUE_SIZE 16
// #define INITIAL_STRING_SIZE 16
//
// /*
// * Buffer management.
// */
//
// #define BUFFER_INIT(context,buffer,size) \
// (((buffer).start = yaml_malloc(size)) ? \
// ((buffer).last = (buffer).pointer = (buffer).start, \
// (buffer).end = (buffer).start+(size), \
// 1) : \
// ((context)->error = yaml_MEMORY_ERROR, \
// 0))
//
// #define BUFFER_DEL(context,buffer) \
// (yaml_free((buffer).start), \
// (buffer).start = (buffer).pointer = (buffer).end = 0)
//
// /*
// * String management.
// */
//
// typedef struct {
// yaml_char_t *start;
// yaml_char_t *end;
// yaml_char_t *pointer;
// } yaml_string_t;
//
// yaml_DECLARE(int)
// yaml_string_extend(yaml_char_t **start,
// yaml_char_t **pointer, yaml_char_t **end);
//
// yaml_DECLARE(int)
// yaml_string_join(
// yaml_char_t **a_start, yaml_char_t **a_pointer, yaml_char_t **a_end,
// yaml_char_t **b_start, yaml_char_t **b_pointer, yaml_char_t **b_end);
//
// #define NULL_STRING { NULL, NULL, NULL }
//
// #define STRING(string,length) { (string), (string)+(length), (string) }
//
// #define STRING_ASSIGN(value,string,length) \
// ((value).start = (string), \
// (value).end = (string)+(length), \
// (value).pointer = (string))
//
// #define STRING_INIT(context,string,size) \
// (((string).start = yaml_malloc(size)) ? \
// ((string).pointer = (string).start, \
// (string).end = (string).start+(size), \
// memset((string).start, 0, (size)), \
// 1) : \
// ((context)->error = yaml_MEMORY_ERROR, \
// 0))
//
// #define STRING_DEL(context,string) \
// (yaml_free((string).start), \
// (string).start = (string).pointer = (string).end = 0)
//
// #define STRING_EXTEND(context,string) \
// (((string).pointer+5 < (string).end) \
// || yaml_string_extend(&(string).start, \
// &(string).pointer, &(string).end))
//
// #define CLEAR(context,string) \
// ((string).pointer = (string).start, \
// memset((string).start, 0, (string).end-(string).start))
//
// #define JOIN(context,string_a,string_b) \
// ((yaml_string_join(&(string_a).start, &(string_a).pointer, \
// &(string_a).end, &(string_b).start, \
// &(string_b).pointer, &(string_b).end)) ? \
// ((string_b).pointer = (string_b).start, \
// 1) : \
// ((context)->error = yaml_MEMORY_ERROR, \
// 0))
//
// /*
// * String check operations.
// */
//
// /*
// * Check the octet at the specified position.
// */
//
// #define CHECK_AT(string,octet,offset) \
// ((string).pointer[offset] == (yaml_char_t)(octet))
//
// /*
// * Check the current octet in the buffer.
// */
//
// #define CHECK(string,octet) CHECK_AT((string),(octet),0)
//
// /*
// * Check if the character at the specified position is an alphabetical
// * character, a digit, '_', or '-'.
// */
//
// #define IS_ALPHA_AT(string,offset) \
// (((string).pointer[offset] >= (yaml_char_t) '0' && \
// (string).pointer[offset] <= (yaml_char_t) '9') || \
// ((string).pointer[offset] >= (yaml_char_t) 'A' && \
// (string).pointer[offset] <= (yaml_char_t) 'Z') || \
// ((string).pointer[offset] >= (yaml_char_t) 'a' && \
// (string).pointer[offset] <= (yaml_char_t) 'z') || \
// (string).pointer[offset] == '_' || \
// (string).pointer[offset] == '-')
//
// #define IS_ALPHA(string) IS_ALPHA_AT((string),0)
//
// /*
// * Check if the character at the specified position is a digit.
// */
//
// #define IS_DIGIT_AT(string,offset) \
// (((string).pointer[offset] >= (yaml_char_t) '0' && \
// (string).pointer[offset] <= (yaml_char_t) '9'))
//
// #define IS_DIGIT(string) IS_DIGIT_AT((string),0)
//
// /*
// * Get the value of a digit.
// */
//
// #define AS_DIGIT_AT(string,offset) \
// ((string).pointer[offset] - (yaml_char_t) '0')
//
// #define AS_DIGIT(string) AS_DIGIT_AT((string),0)
//
// /*
// * Check if the character at the specified position is a hex-digit.
// */
//
// #define IS_HEX_AT(string,offset) \
// (((string).pointer[offset] >= (yaml_char_t) '0' && \
// (string).pointer[offset] <= (yaml_char_t) '9') || \
// ((string).pointer[offset] >= (yaml_char_t) 'A' && \
// (string).pointer[offset] <= (yaml_char_t) 'F') || \
// ((string).pointer[offset] >= (yaml_char_t) 'a' && \
// (string).pointer[offset] <= (yaml_char_t) 'f'))
//
// #define IS_HEX(string) IS_HEX_AT((string),0)
//
// /*
// * Get the value of a hex-digit.
// */
//
// #define AS_HEX_AT(string,offset) \
// (((string).pointer[offset] >= (yaml_char_t) 'A' && \
// (string).pointer[offset] <= (yaml_char_t) 'F') ? \
// ((string).pointer[offset] - (yaml_char_t) 'A' + 10) : \
// ((string).pointer[offset] >= (yaml_char_t) 'a' && \
// (string).pointer[offset] <= (yaml_char_t) 'f') ? \
// ((string).pointer[offset] - (yaml_char_t) 'a' + 10) : \
// ((string).pointer[offset] - (yaml_char_t) '0'))
//
// #define AS_HEX(string) AS_HEX_AT((string),0)
//
// /*
// * Check if the character is ASCII.
// */
//
// #define IS_ASCII_AT(string,offset) \
// ((string).pointer[offset] <= (yaml_char_t) '\x7F')
//
// #define IS_ASCII(string) IS_ASCII_AT((string),0)
//
// /*
// * Check if the character can be printed unescaped.
// */
//
// #define IS_PRINTABLE_AT(string,offset) \
// (((string).pointer[offset] == 0x0A) /* . == #x0A */ \
// || ((string).pointer[offset] >= 0x20 /* #x20 <= . <= #x7E */ \
// && (string).pointer[offset] <= 0x7E) \
// || ((string).pointer[offset] == 0xC2 /* #0xA0 <= . <= #xD7FF */ \
// && (string).pointer[offset+1] >= 0xA0) \
// || ((string).pointer[offset] > 0xC2 \
// && (string).pointer[offset] < 0xED) \
// || ((string).pointer[offset] == 0xED \
// && (string).pointer[offset+1] < 0xA0) \
// || ((string).pointer[offset] == 0xEE) \
// || ((string).pointer[offset] == 0xEF /* #xE000 <= . <= #xFFFD */ \
// && !((string).pointer[offset+1] == 0xBB /* && . != #xFEFF */ \
// && (string).pointer[offset+2] == 0xBF) \
// && !((string).pointer[offset+1] == 0xBF \
// && ((string).pointer[offset+2] == 0xBE \
// || (string).pointer[offset+2] == 0xBF))))
//
// #define IS_PRINTABLE(string) IS_PRINTABLE_AT((string),0)
//
// /*
// * Check if the character at the specified position is NUL.
// */
//
// #define IS_Z_AT(string,offset) CHECK_AT((string),'\0',(offset))
//
// #define IS_Z(string) IS_Z_AT((string),0)
//
// /*
// * Check if the character at the specified position is BOM.
// */
//
// #define IS_BOM_AT(string,offset) \
// (CHECK_AT((string),'\xEF',(offset)) \
// && CHECK_AT((string),'\xBB',(offset)+1) \
// && CHECK_AT((string),'\xBF',(offset)+2)) /* BOM (#xFEFF) */
//
// #define IS_BOM(string) IS_BOM_AT(string,0)
//
// /*
// * Check if the character at the specified position is space.
// */
//
// #define IS_SPACE_AT(string,offset) CHECK_AT((string),' ',(offset))
//
// #define IS_SPACE(string) IS_SPACE_AT((string),0)
//
// /*
// * Check if the character at the specified position is tab.
// */
//
// #define IS_TAB_AT(string,offset) CHECK_AT((string),'\t',(offset))
//
// #define IS_TAB(string) IS_TAB_AT((string),0)
//
// /*
// * Check if the character at the specified position is blank (space or tab).
// */
//
// #define IS_BLANK_AT(string,offset) \
// (IS_SPACE_AT((string),(offset)) || IS_TAB_AT((string),(offset)))
//
// #define IS_BLANK(string) IS_BLANK_AT((string),0)
//
// /*
// * Check if the character at the specified position is a line break.
// */
//
// #define IS_BREAK_AT(string,offset) \
// (CHECK_AT((string),'\r',(offset)) /* CR (#xD)*/ \
// || CHECK_AT((string),'\n',(offset)) /* LF (#xA) */ \
// || (CHECK_AT((string),'\xC2',(offset)) \
// && CHECK_AT((string),'\x85',(offset)+1)) /* NEL (#x85) */ \
// || (CHECK_AT((string),'\xE2',(offset)) \
// && CHECK_AT((string),'\x80',(offset)+1) \
// && CHECK_AT((string),'\xA8',(offset)+2)) /* LS (#x2028) */ \
// || (CHECK_AT((string),'\xE2',(offset)) \
// && CHECK_AT((string),'\x80',(offset)+1) \
// && CHECK_AT((string),'\xA9',(offset)+2))) /* PS (#x2029) */
//
// #define IS_BREAK(string) IS_BREAK_AT((string),0)
//
// #define IS_CRLF_AT(string,offset) \
// (CHECK_AT((string),'\r',(offset)) && CHECK_AT((string),'\n',(offset)+1))
//
// #define IS_CRLF(string) IS_CRLF_AT((string),0)
//
// /*
// * Check if the character is a line break or NUL.
// */
//
// #define IS_BREAKZ_AT(string,offset) \
// (IS_BREAK_AT((string),(offset)) || IS_Z_AT((string),(offset)))
//
// #define IS_BREAKZ(string) IS_BREAKZ_AT((string),0)
//
// /*
// * Check if the character is a line break, space, or NUL.
// */
//
// #define IS_SPACEZ_AT(string,offset) \
// (IS_SPACE_AT((string),(offset)) || IS_BREAKZ_AT((string),(offset)))
//
// #define IS_SPACEZ(string) IS_SPACEZ_AT((string),0)
//
// /*
// * Check if the character is a line break, space, tab, or NUL.
// */
//
// #define IS_BLANKZ_AT(string,offset) \
// (IS_BLANK_AT((string),(offset)) || IS_BREAKZ_AT((string),(offset)))
//
// #define IS_BLANKZ(string) IS_BLANKZ_AT((string),0)
//
// /*
// * Determine the width of the character.
// */
//
// #define WIDTH_AT(string,offset) \
// (((string).pointer[offset] & 0x80) == 0x00 ? 1 : \
// ((string).pointer[offset] & 0xE0) == 0xC0 ? 2 : \
// ((string).pointer[offset] & 0xF0) == 0xE0 ? 3 : \
// ((string).pointer[offset] & 0xF8) == 0xF0 ? 4 : 0)
//
// #define WIDTH(string) WIDTH_AT((string),0)
//
// /*
// * Move the string pointer to the next character.
// */
//
// #define MOVE(string) ((string).pointer += WIDTH((string)))
//
// /*
// * Copy a character and move the pointers of both strings.
// */
//
// #define COPY(string_a,string_b) \
// ((*(string_b).pointer & 0x80) == 0x00 ? \
// (*((string_a).pointer++) = *((string_b).pointer++)) : \
// (*(string_b).pointer & 0xE0) == 0xC0 ? \
// (*((string_a).pointer++) = *((string_b).pointer++), \
// *((string_a).pointer++) = *((string_b).pointer++)) : \
// (*(string_b).pointer & 0xF0) == 0xE0 ? \
// (*((string_a).pointer++) = *((string_b).pointer++), \
// *((string_a).pointer++) = *((string_b).pointer++), \
// *((string_a).pointer++) = *((string_b).pointer++)) : \
// (*(string_b).pointer & 0xF8) == 0xF0 ? \
// (*((string_a).pointer++) = *((string_b).pointer++), \
// *((string_a).pointer++) = *((string_b).pointer++), \
// *((string_a).pointer++) = *((string_b).pointer++), \
// *((string_a).pointer++) = *((string_b).pointer++)) : 0)
//
// /*
// * Stack and queue management.
// */
//
// yaml_DECLARE(int)
// yaml_stack_extend(void **start, void **top, void **end);
//
// yaml_DECLARE(int)
// yaml_queue_extend(void **start, void **head, void **tail, void **end);
//
// #define STACK_INIT(context,stack,size) \
// (((stack).start = yaml_malloc((size)*sizeof(*(stack).start))) ? \
// ((stack).top = (stack).start, \
// (stack).end = (stack).start+(size), \
// 1) : \
// ((context)->error = yaml_MEMORY_ERROR, \
// 0))
//
// #define STACK_DEL(context,stack) \
// (yaml_free((stack).start), \
// (stack).start = (stack).top = (stack).end = 0)
//
// #define STACK_EMPTY(context,stack) \
// ((stack).start == (stack).top)
//
// #define PUSH(context,stack,value) \
// (((stack).top != (stack).end \
// || yaml_stack_extend((void **)&(stack).start, \
// (void **)&(stack).top, (void **)&(stack).end)) ? \
// (*((stack).top++) = value, \
// 1) : \
// ((context)->error = yaml_MEMORY_ERROR, \
// 0))
//
// #define POP(context,stack) \
// (*(--(stack).top))
//
// #define QUEUE_INIT(context,queue,size) \
// (((queue).start = yaml_malloc((size)*sizeof(*(queue).start))) ? \
// ((queue).head = (queue).tail = (queue).start, \
// (queue).end = (queue).start+(size), \
// 1) : \
// ((context)->error = yaml_MEMORY_ERROR, \
// 0))
//
// #define QUEUE_DEL(context,queue) \
// (yaml_free((queue).start), \
// (queue).start = (queue).head = (queue).tail = (queue).end = 0)
//
// #define QUEUE_EMPTY(context,queue) \
// ((queue).head == (queue).tail)
//
// #define ENQUEUE(context,queue,value) \
// (((queue).tail != (queue).end \
// || yaml_queue_extend((void **)&(queue).start, (void **)&(queue).head, \
// (void **)&(queue).tail, (void **)&(queue).end)) ? \
// (*((queue).tail++) = value, \
// 1) : \
// ((context)->error = yaml_MEMORY_ERROR, \
// 0))
//
// #define DEQUEUE(context,queue) \
// (*((queue).head++))
//
// #define QUEUE_INSERT(context,queue,index,value) \
// (((queue).tail != (queue).end \
// || yaml_queue_extend((void **)&(queue).start, (void **)&(queue).head, \
// (void **)&(queue).tail, (void **)&(queue).end)) ? \
// (memmove((queue).head+(index)+1,(queue).head+(index), \
// ((queue).tail-(queue).head-(index))*sizeof(*(queue).start)), \
// *((queue).head+(index)) = value, \
// (queue).tail++, \
// 1) : \
// ((context)->error = yaml_MEMORY_ERROR, \
// 0))
//
// /*
// * Token initializers.
// */
//
// #define TOKEN_INIT(token,token_type,token_start_mark,token_end_mark) \
// (memset(&(token), 0, sizeof(yaml_token_t)), \
// (token).type = (token_type), \
// (token).start_mark = (token_start_mark), \
// (token).end_mark = (token_end_mark))
//
// #define STREAM_START_TOKEN_INIT(token,token_encoding,start_mark,end_mark) \
// (TOKEN_INIT((token),yaml_STREAM_START_TOKEN,(start_mark),(end_mark)), \
// (token).data.stream_start.encoding = (token_encoding))
//
// #define STREAM_END_TOKEN_INIT(token,start_mark,end_mark) \
// (TOKEN_INIT((token),yaml_STREAM_END_TOKEN,(start_mark),(end_mark)))
//
// #define ALIAS_TOKEN_INIT(token,token_value,start_mark,end_mark) \
// (TOKEN_INIT((token),yaml_ALIAS_TOKEN,(start_mark),(end_mark)), \
// (token).data.alias.value = (token_value))
//
// #define ANCHOR_TOKEN_INIT(token,token_value,start_mark,end_mark) \
// (TOKEN_INIT((token),yaml_ANCHOR_TOKEN,(start_mark),(end_mark)), \
// (token).data.anchor.value = (token_value))
//
// #define TAG_TOKEN_INIT(token,token_handle,token_suffix,start_mark,end_mark) \
// (TOKEN_INIT((token),yaml_TAG_TOKEN,(start_mark),(end_mark)), \
// (token).data.tag.handle = (token_handle), \
// (token).data.tag.suffix = (token_suffix))
//
// #define SCALAR_TOKEN_INIT(token,token_value,token_length,token_style,start_mark,end_mark) \
// (TOKEN_INIT((token),yaml_SCALAR_TOKEN,(start_mark),(end_mark)), \
// (token).data.scalar.value = (token_value), \
// (token).data.scalar.length = (token_length), \
// (token).data.scalar.style = (token_style))
//
// #define VERSION_DIRECTIVE_TOKEN_INIT(token,token_major,token_minor,start_mark,end_mark) \
// (TOKEN_INIT((token),yaml_VERSION_DIRECTIVE_TOKEN,(start_mark),(end_mark)), \
// (token).data.version_directive.major = (token_major), \
// (token).data.version_directive.minor = (token_minor))
//
// #define TAG_DIRECTIVE_TOKEN_INIT(token,token_handle,token_prefix,start_mark,end_mark) \
// (TOKEN_INIT((token),yaml_TAG_DIRECTIVE_TOKEN,(start_mark),(end_mark)), \
// (token).data.tag_directive.handle = (token_handle), \
// (token).data.tag_directive.prefix = (token_prefix))
//
// /*
// * Event initializers.
// */
//
// #define EVENT_INIT(event,event_type,event_start_mark,event_end_mark) \
// (memset(&(event), 0, sizeof(yaml_event_t)), \
// (event).type = (event_type), \
// (event).start_mark = (event_start_mark), \
// (event).end_mark = (event_end_mark))
//
// #define STREAM_START_EVENT_INIT(event,event_encoding,start_mark,end_mark) \
// (EVENT_INIT((event),yaml_STREAM_START_EVENT,(start_mark),(end_mark)), \
// (event).data.stream_start.encoding = (event_encoding))
//
// #define STREAM_END_EVENT_INIT(event,start_mark,end_mark) \
// (EVENT_INIT((event),yaml_STREAM_END_EVENT,(start_mark),(end_mark)))
//
// #define DOCUMENT_START_EVENT_INIT(event,event_version_directive, \
// event_tag_directives_start,event_tag_directives_end,event_implicit,start_mark,end_mark) \
// (EVENT_INIT((event),yaml_DOCUMENT_START_EVENT,(start_mark),(end_mark)), \
// (event).data.document_start.version_directive = (event_version_directive), \
// (event).data.document_start.tag_directives.start = (event_tag_directives_start), \
// (event).data.document_start.tag_directives.end = (event_tag_directives_end), \
// (event).data.document_start.implicit = (event_implicit))
//
// #define DOCUMENT_END_EVENT_INIT(event,event_implicit,start_mark,end_mark) \
// (EVENT_INIT((event),yaml_DOCUMENT_END_EVENT,(start_mark),(end_mark)), \
// (event).data.document_end.implicit = (event_implicit))
//
// #define ALIAS_EVENT_INIT(event,event_anchor,start_mark,end_mark) \
// (EVENT_INIT((event),yaml_ALIAS_EVENT,(start_mark),(end_mark)), \
// (event).data.alias.anchor = (event_anchor))
//
// #define SCALAR_EVENT_INIT(event,event_anchor,event_tag,event_value,event_length, \
// event_plain_implicit, event_quoted_implicit,event_style,start_mark,end_mark) \
// (EVENT_INIT((event),yaml_SCALAR_EVENT,(start_mark),(end_mark)), \
// (event).data.scalar.anchor = (event_anchor), \
// (event).data.scalar.tag = (event_tag), \
// (event).data.scalar.value = (event_value), \
// (event).data.scalar.length = (event_length), \
// (event).data.scalar.plain_implicit = (event_plain_implicit), \
// (event).data.scalar.quoted_implicit = (event_quoted_implicit), \
// (event).data.scalar.style = (event_style))
//
// #define SEQUENCE_START_EVENT_INIT(event,event_anchor,event_tag, \
// event_implicit,event_style,start_mark,end_mark) \
// (EVENT_INIT((event),yaml_SEQUENCE_START_EVENT,(start_mark),(end_mark)), \
// (event).data.sequence_start.anchor = (event_anchor), \
// (event).data.sequence_start.tag = (event_tag), \
// (event).data.sequence_start.implicit = (event_implicit), \
// (event).data.sequence_start.style = (event_style))
//
// #define SEQUENCE_END_EVENT_INIT(event,start_mark,end_mark) \
// (EVENT_INIT((event),yaml_SEQUENCE_END_EVENT,(start_mark),(end_mark)))
//
// #define MAPPING_START_EVENT_INIT(event,event_anchor,event_tag, \
// event_implicit,event_style,start_mark,end_mark) \
// (EVENT_INIT((event),yaml_MAPPING_START_EVENT,(start_mark),(end_mark)), \
// (event).data.mapping_start.anchor = (event_anchor), \
// (event).data.mapping_start.tag = (event_tag), \
// (event).data.mapping_start.implicit = (event_implicit), \
// (event).data.mapping_start.style = (event_style))
//
// #define MAPPING_END_EVENT_INIT(event,start_mark,end_mark) \
// (EVENT_INIT((event),yaml_MAPPING_END_EVENT,(start_mark),(end_mark)))
//
// /*
// * Document initializer.
// */
//
// #define DOCUMENT_INIT(document,document_nodes_start,document_nodes_end, \
// document_version_directive,document_tag_directives_start, \
// document_tag_directives_end,document_start_implicit, \
// document_end_implicit,document_start_mark,document_end_mark) \
// (memset(&(document), 0, sizeof(yaml_document_t)), \
// (document).nodes.start = (document_nodes_start), \
// (document).nodes.end = (document_nodes_end), \
// (document).nodes.top = (document_nodes_start), \
// (document).version_directive = (document_version_directive), \
// (document).tag_directives.start = (document_tag_directives_start), \
// (document).tag_directives.end = (document_tag_directives_end), \
// (document).start_implicit = (document_start_implicit), \
// (document).end_implicit = (document_end_implicit), \
// (document).start_mark = (document_start_mark), \
// (document).end_mark = (document_end_mark))
//
// /*
// * Node initializers.
// */
//
// #define NODE_INIT(node,node_type,node_tag,node_start_mark,node_end_mark) \
// (memset(&(node), 0, sizeof(yaml_node_t)), \
// (node).type = (node_type), \
// (node).tag = (node_tag), \
// (node).start_mark = (node_start_mark), \
// (node).end_mark = (node_end_mark))
//
// #define SCALAR_NODE_INIT(node,node_tag,node_value,node_length, \
// node_style,start_mark,end_mark) \
// (NODE_INIT((node),yaml_SCALAR_NODE,(node_tag),(start_mark),(end_mark)), \
// (node).data.scalar.value = (node_value), \
// (node).data.scalar.length = (node_length), \
// (node).data.scalar.style = (node_style))
//
// #define SEQUENCE_NODE_INIT(node,node_tag,node_items_start,node_items_end, \
// node_style,start_mark,end_mark) \
// (NODE_INIT((node),yaml_SEQUENCE_NODE,(node_tag),(start_mark),(end_mark)), \
// (node).data.sequence.items.start = (node_items_start), \
// (node).data.sequence.items.end = (node_items_end), \
// (node).data.sequence.items.top = (node_items_start), \
// (node).data.sequence.style = (node_style))
//
// #define MAPPING_NODE_INIT(node,node_tag,node_pairs_start,node_pairs_end, \
// node_style,start_mark,end_mark) \
// (NODE_INIT((node),yaml_MAPPING_NODE,(node_tag),(start_mark),(end_mark)), \
// (node).data.mapping.pairs.start = (node_pairs_start), \
// (node).data.mapping.pairs.end = (node_pairs_end), \
// (node).data.mapping.pairs.top = (node_pairs_start), \
// (node).data.mapping.style = (node_style))
//

View File

@@ -1,953 +0,0 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package candiedyaml
import (
"fmt"
"io"
)
/** The version directive data. */
type yaml_version_directive_t struct {
major int // The major version number
minor int // The minor version number
}
/** The tag directive data. */
type yaml_tag_directive_t struct {
handle []byte // The tag handle
prefix []byte // The tag prefix
}
/** The stream encoding. */
type yaml_encoding_t int
const (
/** Let the parser choose the encoding. */
yaml_ANY_ENCODING yaml_encoding_t = iota
/** The defau lt UTF-8 encoding. */
yaml_UTF8_ENCODING
/** The UTF-16-LE encoding with BOM. */
yaml_UTF16LE_ENCODING
/** The UTF-16-BE encoding with BOM. */
yaml_UTF16BE_ENCODING
)
/** Line break types. */
type yaml_break_t int
const (
yaml_ANY_BREAK yaml_break_t = iota /** Let the parser choose the break type. */
yaml_CR_BREAK /** Use CR for line breaks (Mac style). */
yaml_LN_BREAK /** Use LN for line breaks (Unix style). */
yaml_CRLN_BREAK /** Use CR LN for line breaks (DOS style). */
)
/** Many bad things could happen with the parser and emitter. */
type YAML_error_type_t int
const (
/** No error is produced. */
yaml_NO_ERROR YAML_error_type_t = iota
/** Cannot allocate or reallocate a block of memory. */
yaml_MEMORY_ERROR
/** Cannot read or decode the input stream. */
yaml_READER_ERROR
/** Cannot scan the input stream. */
yaml_SCANNER_ERROR
/** Cannot parse the input stream. */
yaml_PARSER_ERROR
/** Cannot compose a YAML document. */
yaml_COMPOSER_ERROR
/** Cannot write to the output stream. */
yaml_WRITER_ERROR
/** Cannot emit a YAML stream. */
yaml_EMITTER_ERROR
)
/** The pointer position. */
type YAML_mark_t struct {
/** The position index. */
index int
/** The position line. */
line int
/** The position column. */
column int
}
func (m YAML_mark_t) String() string {
return fmt.Sprintf("line %d, column %d", m.line, m.column)
}
/** @} */
/**
* @defgroup styles Node Styles
* @{
*/
type yaml_style_t int
/** Scalar styles. */
type yaml_scalar_style_t yaml_style_t
const (
/** Let the emitter choose the style. */
yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota
/** The plain scalar style. */
yaml_PLAIN_SCALAR_STYLE
/** The single-quoted scalar style. */
yaml_SINGLE_QUOTED_SCALAR_STYLE
/** The double-quoted scalar style. */
yaml_DOUBLE_QUOTED_SCALAR_STYLE
/** The literal scalar style. */
yaml_LITERAL_SCALAR_STYLE
/** The folded scalar style. */
yaml_FOLDED_SCALAR_STYLE
)
/** Sequence styles. */
type yaml_sequence_style_t yaml_style_t
const (
/** Let the emitter choose the style. */
yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
/** The block sequence style. */
yaml_BLOCK_SEQUENCE_STYLE
/** The flow sequence style. */
yaml_FLOW_SEQUENCE_STYLE
)
/** Mapping styles. */
type yaml_mapping_style_t yaml_style_t
const (
/** Let the emitter choose the style. */
yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
/** The block mapping style. */
yaml_BLOCK_MAPPING_STYLE
/** The flow mapping style. */
yaml_FLOW_MAPPING_STYLE
/* yaml_FLOW_SET_MAPPING_STYLE */
)
/** @} */
/**
* @defgroup tokens Tokens
* @{
*/
/** Token types. */
type yaml_token_type_t int
const (
/** An empty token. */
yaml_NO_TOKEN yaml_token_type_t = iota
/** A STREAM-START token. */
yaml_STREAM_START_TOKEN
/** A STREAM-END token. */
yaml_STREAM_END_TOKEN
/** A VERSION-DIRECTIVE token. */
yaml_VERSION_DIRECTIVE_TOKEN
/** A TAG-DIRECTIVE token. */
yaml_TAG_DIRECTIVE_TOKEN
/** A DOCUMENT-START token. */
yaml_DOCUMENT_START_TOKEN
/** A DOCUMENT-END token. */
yaml_DOCUMENT_END_TOKEN
/** A BLOCK-SEQUENCE-START token. */
yaml_BLOCK_SEQUENCE_START_TOKEN
/** A BLOCK-SEQUENCE-END token. */
yaml_BLOCK_MAPPING_START_TOKEN
/** A BLOCK-END token. */
yaml_BLOCK_END_TOKEN
/** A FLOW-SEQUENCE-START token. */
yaml_FLOW_SEQUENCE_START_TOKEN
/** A FLOW-SEQUENCE-END token. */
yaml_FLOW_SEQUENCE_END_TOKEN
/** A FLOW-MAPPING-START token. */
yaml_FLOW_MAPPING_START_TOKEN
/** A FLOW-MAPPING-END token. */
yaml_FLOW_MAPPING_END_TOKEN
/** A BLOCK-ENTRY token. */
yaml_BLOCK_ENTRY_TOKEN
/** A FLOW-ENTRY token. */
yaml_FLOW_ENTRY_TOKEN
/** A KEY token. */
yaml_KEY_TOKEN
/** A VALUE token. */
yaml_VALUE_TOKEN
/** An ALIAS token. */
yaml_ALIAS_TOKEN
/** An ANCHOR token. */
yaml_ANCHOR_TOKEN
/** A TAG token. */
yaml_TAG_TOKEN
/** A SCALAR token. */
yaml_SCALAR_TOKEN
)
/** The token structure. */
type yaml_token_t struct {
/** The token type. */
token_type yaml_token_type_t
/** The token data. */
/** The stream start (for @c yaml_STREAM_START_TOKEN). */
encoding yaml_encoding_t
/** The alias (for @c yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN,yaml_TAG_TOKEN ). */
/** The anchor (for @c ). */
/** The scalar value (for @c ). */
value []byte
/** The tag suffix. */
suffix []byte
/** The scalar value (for @c yaml_SCALAR_TOKEN). */
/** The scalar style. */
style yaml_scalar_style_t
/** The version directive (for @c yaml_VERSION_DIRECTIVE_TOKEN). */
version_directive yaml_version_directive_t
/** The tag directive (for @c yaml_TAG_DIRECTIVE_TOKEN). */
prefix []byte
/** The beginning of the token. */
start_mark YAML_mark_t
/** The end of the token. */
end_mark YAML_mark_t
major, minor int
}
/**
* @defgroup events Events
* @{
*/
/** Event types. */
type yaml_event_type_t int
const (
/** An empty event. */
yaml_NO_EVENT yaml_event_type_t = iota
/** A STREAM-START event. */
yaml_STREAM_START_EVENT
/** A STREAM-END event. */
yaml_STREAM_END_EVENT
/** A DOCUMENT-START event. */
yaml_DOCUMENT_START_EVENT
/** A DOCUMENT-END event. */
yaml_DOCUMENT_END_EVENT
/** An ALIAS event. */
yaml_ALIAS_EVENT
/** A SCALAR event. */
yaml_SCALAR_EVENT
/** A SEQUENCE-START event. */
yaml_SEQUENCE_START_EVENT
/** A SEQUENCE-END event. */
yaml_SEQUENCE_END_EVENT
/** A MAPPING-START event. */
yaml_MAPPING_START_EVENT
/** A MAPPING-END event. */
yaml_MAPPING_END_EVENT
)
/** The event structure. */
type yaml_event_t struct {
/** The event type. */
event_type yaml_event_type_t
/** The stream parameters (for @c yaml_STREAM_START_EVENT). */
encoding yaml_encoding_t
/** The document parameters (for @c yaml_DOCUMENT_START_EVENT). */
version_directive *yaml_version_directive_t
/** The beginning and end of the tag directives list. */
tag_directives []yaml_tag_directive_t
/** The document parameters (for @c yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT,yaml_MAPPING_START_EVENT). */
/** Is the document indicator implicit? */
implicit bool
/** The alias parameters (for @c yaml_ALIAS_EVENT,yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). */
/** The anchor. */
anchor []byte
/** The scalar parameters (for @c yaml_SCALAR_EVENT,yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). */
/** The tag. */
tag []byte
/** The scalar value. */
value []byte
/** Is the tag optional for the plain style? */
plain_implicit bool
/** Is the tag optional for any non-plain style? */
quoted_implicit bool
/** The sequence parameters (for @c yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT). */
/** The sequence style. */
/** The scalar style. */
style yaml_style_t
/** The beginning of the event. */
start_mark, end_mark YAML_mark_t
}
/**
* @defgroup nodes Nodes
* @{
*/
const (
/** The tag @c !!null with the only possible value: @c null. */
yaml_NULL_TAG = "tag:yaml.org,2002:null"
/** The tag @c !!bool with the values: @c true and @c falce. */
yaml_BOOL_TAG = "tag:yaml.org,2002:bool"
/** The tag @c !!str for string values. */
yaml_STR_TAG = "tag:yaml.org,2002:str"
/** The tag @c !!int for integer values. */
yaml_INT_TAG = "tag:yaml.org,2002:int"
/** The tag @c !!float for float values. */
yaml_FLOAT_TAG = "tag:yaml.org,2002:float"
/** The tag @c !!timestamp for date and time values. */
yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp"
/** The tag @c !!seq is used to denote sequences. */
yaml_SEQ_TAG = "tag:yaml.org,2002:seq"
/** The tag @c !!map is used to denote mapping. */
yaml_MAP_TAG = "tag:yaml.org,2002:map"
/** The default scalar tag is @c !!str. */
yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG
/** The default sequence tag is @c !!seq. */
yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG
/** The default mapping tag is @c !!map. */
yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG
yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
)
/** Node types. */
type yaml_node_type_t int
const (
/** An empty node. */
yaml_NO_NODE yaml_node_type_t = iota
/** A scalar node. */
yaml_SCALAR_NODE
/** A sequence node. */
yaml_SEQUENCE_NODE
/** A mapping node. */
yaml_MAPPING_NODE
)
/** An element of a sequence node. */
type yaml_node_item_t int
/** An element of a mapping node. */
type yaml_node_pair_t struct {
/** The key of the element. */
key int
/** The value of the element. */
value int
}
/** The node structure. */
type yaml_node_t struct {
/** The node type. */
node_type yaml_node_type_t
/** The node tag. */
tag []byte
/** The scalar parameters (for @c yaml_SCALAR_NODE). */
scalar struct {
/** The scalar value. */
value []byte
/** The scalar style. */
style yaml_scalar_style_t
}
/** The sequence parameters (for @c yaml_SEQUENCE_NODE). */
sequence struct {
/** The stack of sequence items. */
items []yaml_node_item_t
/** The sequence style. */
style yaml_sequence_style_t
}
/** The mapping parameters (for @c yaml_MAPPING_NODE). */
mapping struct {
/** The stack of mapping pairs (key, value). */
pairs []yaml_node_pair_t
/** The mapping style. */
style yaml_mapping_style_t
}
/** The beginning of the node. */
start_mark YAML_mark_t
/** The end of the node. */
end_mark YAML_mark_t
}
/** The document structure. */
type yaml_document_t struct {
/** The document nodes. */
nodes []yaml_node_t
/** The version directive. */
version_directive *yaml_version_directive_t
/** The list of tag directives. */
tags []yaml_tag_directive_t
/** Is the document start indicator implicit? */
start_implicit bool
/** Is the document end indicator implicit? */
end_implicit bool
/** The beginning of the document. */
start_mark YAML_mark_t
/** The end of the document. */
end_mark YAML_mark_t
}
/**
* The prototype of a read handler.
*
* The read handler is called when the parser needs to read more bytes from the
* source. The handler should write not more than @a size bytes to the @a
* buffer. The number of written bytes should be set to the @a length variable.
*
* @param[in,out] data A pointer to an application data specified by
* yaml_parser_set_input().
* @param[out] buffer The buffer to write the data from the source.
* @param[in] size The size of the buffer.
* @param[out] size_read The actual number of bytes read from the source.
*
* @returns On success, the handler should return @c 1. If the handler failed,
* the returned value should be @c 0. On EOF, the handler should set the
* @a size_read to @c 0 and return @c 1.
*/
type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
/**
* This structure holds information about a potential simple key.
*/
type yaml_simple_key_t struct {
/** Is a simple key possible? */
possible bool
/** Is a simple key required? */
required bool
/** The number of the token. */
token_number int
/** The position mark. */
mark YAML_mark_t
}
/**
* The states of the parser.
*/
type yaml_parser_state_t int
const (
/** Expect STREAM-START. */
yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
/** Expect the beginning of an implicit document. */
yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE
/** Expect DOCUMENT-START. */
yaml_PARSE_DOCUMENT_START_STATE
/** Expect the content of a document. */
yaml_PARSE_DOCUMENT_CONTENT_STATE
/** Expect DOCUMENT-END. */
yaml_PARSE_DOCUMENT_END_STATE
/** Expect a block node. */
yaml_PARSE_BLOCK_NODE_STATE
/** Expect a block node or indentless sequence. */
yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE
/** Expect a flow node. */
yaml_PARSE_FLOW_NODE_STATE
/** Expect the first entry of a block sequence. */
yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE
/** Expect an entry of a block sequence. */
yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE
/** Expect an entry of an indentless sequence. */
yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
/** Expect the first key of a block mapping. */
yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE
/** Expect a block mapping key. */
yaml_PARSE_BLOCK_MAPPING_KEY_STATE
/** Expect a block mapping value. */
yaml_PARSE_BLOCK_MAPPING_VALUE_STATE
/** Expect the first entry of a flow sequence. */
yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE
/** Expect an entry of a flow sequence. */
yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE
/** Expect a key of an ordered mapping. */
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE
/** Expect a value of an ordered mapping. */
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE
/** Expect the and of an ordered mapping entry. */
yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE
/** Expect the first key of a flow mapping. */
yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE
/** Expect a key of a flow mapping. */
yaml_PARSE_FLOW_MAPPING_KEY_STATE
/** Expect a value of a flow mapping. */
yaml_PARSE_FLOW_MAPPING_VALUE_STATE
/** Expect an empty value of a flow mapping. */
yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE
/** Expect nothing. */
yaml_PARSE_END_STATE
)
/**
* This structure holds aliases data.
*/
type yaml_alias_data_t struct {
/** The anchor. */
anchor []byte
/** The node id. */
index int
/** The anchor mark. */
mark YAML_mark_t
}
/**
* The parser structure.
*
* All members are internal. Manage the structure using the @c yaml_parser_
* family of functions.
*/
type yaml_parser_t struct {
/**
* @name Error handling
* @{
*/
/** Error type. */
error YAML_error_type_t
/** Error description. */
problem string
/** The byte about which the problem occured. */
problem_offset int
/** The problematic value (@c -1 is none). */
problem_value int
/** The problem position. */
problem_mark YAML_mark_t
/** The error context. */
context string
/** The context position. */
context_mark YAML_mark_t
/**
* @}
*/
/**
* @name Reader stuff
* @{
*/
/** Read handler. */
read_handler yaml_read_handler_t
/** Reader input data. */
input_reader io.Reader
input []byte
input_pos int
/** EOF flag */
eof bool
/** The working buffer. */
buffer []byte
buffer_pos int
/* The number of unread characters in the buffer. */
unread int
/** The raw buffer. */
raw_buffer []byte
raw_buffer_pos int
/** The input encoding. */
encoding yaml_encoding_t
/** The offset of the current position (in bytes). */
offset int
/** The mark of the current position. */
mark YAML_mark_t
/**
* @}
*/
/**
* @name Scanner stuff
* @{
*/
/** Have we started to scan the input stream? */
stream_start_produced bool
/** Have we reached the end of the input stream? */
stream_end_produced bool
/** The number of unclosed '[' and '{' indicators. */
flow_level int
/** The tokens queue. */
tokens []yaml_token_t
tokens_head int
/** The number of tokens fetched from the queue. */
tokens_parsed int
/* Does the tokens queue contain a token ready for dequeueing. */
token_available bool
/** The indentation levels stack. */
indents []int
/** The current indentation level. */
indent int
/** May a simple key occur at the current position? */
simple_key_allowed bool
/** The stack of simple keys. */
simple_keys []yaml_simple_key_t
/**
* @}
*/
/**
* @name Parser stuff
* @{
*/
/** The parser states stack. */
states []yaml_parser_state_t
/** The current parser state. */
state yaml_parser_state_t
/** The stack of marks. */
marks []YAML_mark_t
/** The list of TAG directives. */
tag_directives []yaml_tag_directive_t
/**
* @}
*/
/**
* @name Dumper stuff
* @{
*/
/** The alias data. */
aliases []yaml_alias_data_t
/** The currently parsed document. */
document *yaml_document_t
/**
* @}
*/
}
/**
* The prototype of a write handler.
*
* The write handler is called when the emitter needs to flush the accumulated
* characters to the output. The handler should write @a size bytes of the
* @a buffer to the output.
*
* @param[in,out] data A pointer to an application data specified by
* yaml_emitter_set_output().
* @param[in] buffer The buffer with bytes to be written.
* @param[in] size The size of the buffer.
*
* @returns On success, the handler should return @c 1. If the handler failed,
* the returned value should be @c 0.
*/
type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
/** The emitter states. */
type yaml_emitter_state_t int
const (
/** Expect STREAM-START. */
yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
/** Expect the first DOCUMENT-START or STREAM-END. */
yaml_EMIT_FIRST_DOCUMENT_START_STATE
/** Expect DOCUMENT-START or STREAM-END. */
yaml_EMIT_DOCUMENT_START_STATE
/** Expect the content of a document. */
yaml_EMIT_DOCUMENT_CONTENT_STATE
/** Expect DOCUMENT-END. */
yaml_EMIT_DOCUMENT_END_STATE
/** Expect the first item of a flow sequence. */
yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE
/** Expect an item of a flow sequence. */
yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE
/** Expect the first key of a flow mapping. */
yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE
/** Expect a key of a flow mapping. */
yaml_EMIT_FLOW_MAPPING_KEY_STATE
/** Expect a value for a simple key of a flow mapping. */
yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE
/** Expect a value of a flow mapping. */
yaml_EMIT_FLOW_MAPPING_VALUE_STATE
/** Expect the first item of a block sequence. */
yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE
/** Expect an item of a block sequence. */
yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE
/** Expect the first key of a block mapping. */
yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE
/** Expect the key of a block mapping. */
yaml_EMIT_BLOCK_MAPPING_KEY_STATE
/** Expect a value for a simple key of a block mapping. */
yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE
/** Expect a value of a block mapping. */
yaml_EMIT_BLOCK_MAPPING_VALUE_STATE
/** Expect nothing. */
yaml_EMIT_END_STATE
)
/**
* The emitter structure.
*
* All members are internal. Manage the structure using the @c yaml_emitter_
* family of functions.
*/
type yaml_emitter_t struct {
/**
* @name Error handling
* @{
*/
/** Error type. */
error YAML_error_type_t
/** Error description. */
problem string
/**
* @}
*/
/**
* @name Writer stuff
* @{
*/
/** Write handler. */
write_handler yaml_write_handler_t
/** Standard (string or file) output data. */
output_buffer *[]byte
output_writer io.Writer
/** The working buffer. */
buffer []byte
buffer_pos int
/** The raw buffer. */
raw_buffer []byte
raw_buffer_pos int
/** The stream encoding. */
encoding yaml_encoding_t
/**
* @}
*/
/**
* @name Emitter stuff
* @{
*/
/** If the output is in the canonical style? */
canonical bool
/** The number of indentation spaces. */
best_indent int
/** The preferred width of the output lines. */
best_width int
/** Allow unescaped non-ASCII characters? */
unicode bool
/** The preferred line break. */
line_break yaml_break_t
/** The stack of states. */
states []yaml_emitter_state_t
/** The current emitter state. */
state yaml_emitter_state_t
/** The event queue. */
events []yaml_event_t
events_head int
/** The stack of indentation levels. */
indents []int
/** The list of tag directives. */
tag_directives []yaml_tag_directive_t
/** The current indentation level. */
indent int
/** The current flow level. */
flow_level int
/** Is it the document root context? */
root_context bool
/** Is it a sequence context? */
sequence_context bool
/** Is it a mapping context? */
mapping_context bool
/** Is it a simple mapping key context? */
simple_key_context bool
/** The current line. */
line int
/** The current column. */
column int
/** If the last character was a whitespace? */
whitespace bool
/** If the last character was an indentation character (' ', '-', '?', ':')? */
indention bool
/** If an explicit document end is required? */
open_ended bool
/** Anchor analysis. */
anchor_data struct {
/** The anchor value. */
anchor []byte
/** Is it an alias? */
alias bool
}
/** Tag analysis. */
tag_data struct {
/** The tag handle. */
handle []byte
/** The tag suffix. */
suffix []byte
}
/** Scalar analysis. */
scalar_data struct {
/** The scalar value. */
value []byte
/** Does the scalar contain line breaks? */
multiline bool
/** Can the scalar be expessed in the flow plain style? */
flow_plain_allowed bool
/** Can the scalar be expressed in the block plain style? */
block_plain_allowed bool
/** Can the scalar be expressed in the single quoted style? */
single_quoted_allowed bool
/** Can the scalar be expressed in the literal or folded styles? */
block_allowed bool
/** The output style. */
style yaml_scalar_style_t
}
/**
* @}
*/
/**
* @name Dumper stuff
* @{
*/
/** If the stream was already opened? */
opened bool
/** If the stream was already closed? */
closed bool
/** The information associated with the document nodes. */
anchors *struct {
/** The number of references. */
references int
/** The anchor id. */
anchor int
/** If the node has been emitted? */
serialized bool
}
/** The last assigned anchor id. */
last_anchor_id int
/** The currently emitted document. */
document *yaml_document_t
/**
* @}
*/
}

View File

@@ -22,7 +22,6 @@ import (
"net"
"net/http"
"net/url"
"reflect"
"sort"
"strconv"
"sync"
@@ -261,53 +260,67 @@ type httpClusterClient struct {
selectionMode EndpointSelectionMode
}
func (c *httpClusterClient) getLeaderEndpoint() (string, error) {
mAPI := NewMembersAPI(c)
leader, err := mAPI.Leader(context.Background())
func (c *httpClusterClient) getLeaderEndpoint(ctx context.Context, eps []url.URL) (string, error) {
ceps := make([]url.URL, len(eps))
copy(ceps, eps)
// To perform a lookup on the new endpoint list without using the current
// client, we'll copy it
clientCopy := &httpClusterClient{
clientFactory: c.clientFactory,
credentials: c.credentials,
rand: c.rand,
pinned: 0,
endpoints: ceps,
}
mAPI := NewMembersAPI(clientCopy)
leader, err := mAPI.Leader(ctx)
if err != nil {
return "", err
}
if len(leader.ClientURLs) == 0 {
return "", ErrNoLeaderEndpoint
}
return leader.ClientURLs[0], nil // TODO: how to handle multiple client URLs?
}
func (c *httpClusterClient) SetEndpoints(eps []string) error {
func (c *httpClusterClient) parseEndpoints(eps []string) ([]url.URL, error) {
if len(eps) == 0 {
return ErrNoEndpoints
return []url.URL{}, ErrNoEndpoints
}
neps := make([]url.URL, len(eps))
for i, ep := range eps {
u, err := url.Parse(ep)
if err != nil {
return err
return []url.URL{}, err
}
neps[i] = *u
}
return neps, nil
}
switch c.selectionMode {
case EndpointSelectionRandom:
c.endpoints = shuffleEndpoints(c.rand, neps)
c.pinned = 0
case EndpointSelectionPrioritizeLeader:
c.endpoints = neps
lep, err := c.getLeaderEndpoint()
if err != nil {
return ErrNoLeaderEndpoint
}
for i := range c.endpoints {
if c.endpoints[i].String() == lep {
c.pinned = i
break
}
}
// If endpoints doesn't have the lu, just keep c.pinned = 0.
// Forwarding between follower and leader would be required but it works.
default:
return fmt.Errorf("invalid endpoint selection mode: %d", c.selectionMode)
func (c *httpClusterClient) SetEndpoints(eps []string) error {
neps, err := c.parseEndpoints(eps)
if err != nil {
return err
}
c.Lock()
defer c.Unlock()
c.endpoints = shuffleEndpoints(c.rand, neps)
// We're not doing anything for PrioritizeLeader here. This is
// due to not having a context meaning we can't call getLeaderEndpoint
// However, if you're using PrioritizeLeader, you've already been told
// to regularly call sync, where we do have a ctx, and can figure the
// leader. PrioritizeLeader is also quite a loose guarantee, so deal
// with it
c.pinned = 0
return nil
}
@@ -401,27 +414,51 @@ func (c *httpClusterClient) Sync(ctx context.Context) error {
return err
}
c.Lock()
defer c.Unlock()
var eps []string
for _, m := range ms {
eps = append(eps, m.ClientURLs...)
}
sort.Sort(sort.StringSlice(eps))
ceps := make([]string, len(c.endpoints))
for i, cep := range c.endpoints {
ceps[i] = cep.String()
}
sort.Sort(sort.StringSlice(ceps))
// fast path if no change happens
// this helps client to pin the endpoint when no cluster change
if reflect.DeepEqual(eps, ceps) {
return nil
neps, err := c.parseEndpoints(eps)
if err != nil {
return err
}
return c.SetEndpoints(eps)
npin := 0
switch c.selectionMode {
case EndpointSelectionRandom:
c.RLock()
eq := endpointsEqual(c.endpoints, neps)
c.RUnlock()
if eq {
return nil
}
// When items in the endpoint list changes, we choose a new pin
neps = shuffleEndpoints(c.rand, neps)
case EndpointSelectionPrioritizeLeader:
nle, err := c.getLeaderEndpoint(ctx, neps)
if err != nil {
return ErrNoLeaderEndpoint
}
for i, n := range neps {
if n.String() == nle {
npin = i
break
}
}
default:
return fmt.Errorf("invalid endpoint selection mode: %d", c.selectionMode)
}
c.Lock()
defer c.Unlock()
c.endpoints = neps
c.pinned = npin
return nil
}
func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration) error {
@@ -607,3 +644,27 @@ func shuffleEndpoints(r *rand.Rand, eps []url.URL) []url.URL {
}
return neps
}
func endpointsEqual(left, right []url.URL) bool {
if len(left) != len(right) {
return false
}
sLeft := make([]string, len(left))
sRight := make([]string, len(right))
for i, l := range left {
sLeft[i] = l.String()
}
for i, r := range right {
sRight[i] = r.String()
}
sort.Strings(sLeft)
sort.Strings(sRight)
for i := range sLeft {
if sLeft[i] != sRight[i] {
return false
}
}
return true
}

View File

@@ -272,6 +272,10 @@ type Response struct {
// Index holds the cluster-level index at the time the Response was generated.
// This index is not tied to the Node(s) contained in this Response.
Index uint64 `json:"-"`
// ClusterID holds the cluster-level ID reported by the server. This
// should be different for different etcd clusters.
ClusterID string `json:"-"`
}
type Node struct {
@@ -665,6 +669,7 @@ func unmarshalSuccessfulKeysResponse(header http.Header, body []byte) (*Response
return nil, err
}
}
res.ClusterID = header.Get("X-Etcd-Cluster-ID")
return &res, nil
}

View File

@@ -95,6 +95,11 @@ func (l *LogLevel) Set(s string) error {
return nil
}
// Returns an empty string, only here to fulfill the pflag.Value interface.
func (l *LogLevel) Type() string {
return ""
}
// ParseLevel translates some potential loglevel strings into their corresponding levels.
func ParseLevel(s string) (LogLevel, error) {
switch s {

View File

@@ -70,6 +70,7 @@ type DuoApi struct {
type apiOptions struct {
timeout time.Duration
insecure bool
proxy func(*http.Request) (*url.URL, error)
}
// Optional parameter for NewDuoApi, used to configure timeouts on API calls.
@@ -87,6 +88,14 @@ func SetInsecure() func(*apiOptions) {
}
}
// Optional parameter for NewDuoApi, used to configure an HTTP Connect proxy
// server for all outbound communications.
func SetProxy(proxy func(*http.Request) (*url.URL, error)) func(*apiOptions) {
return func(opts *apiOptions) {
opts.proxy = proxy
}
}
// Build an return a DuoApi struct.
// ikey is your Duo integration key
// skey is your Duo integration secret key
@@ -94,7 +103,7 @@ func SetInsecure() func(*apiOptions) {
// userAgent allows you to specify the user agent string used when making
// the web request to Duo.
// options are optional parameters. Use SetTimeout() to specify a timeout value
// for Rest API calls.
// for Rest API calls. Use SetProxy() to specify proxy settings for Duo API calls.
//
// Example: duoapi.NewDuoApi(ikey,skey,host,userAgent,duoapi.SetTimeout(10*time.Second))
func NewDuoApi(ikey string,
@@ -102,7 +111,7 @@ func NewDuoApi(ikey string,
host string,
userAgent string,
options ...func(*apiOptions)) *DuoApi {
opts := apiOptions{}
opts := apiOptions{proxy: http.ProxyFromEnvironment}
for _, o := range options {
o(&opts)
}
@@ -112,7 +121,7 @@ func NewDuoApi(ikey string,
certPool.AppendCertsFromPEM([]byte(duoPinnedCert))
tr := &http.Transport{
Proxy: http.ProxyFromEnvironment,
Proxy: opts.proxy,
TLSClientConfig: &tls.Config{
RootCAs: certPool,
InsecureSkipVerify: opts.insecure,

View File

@@ -4,13 +4,13 @@
## Introduction
A wrapper around [candiedyaml](https://github.com/cloudfoundry-incubator/candiedyaml) designed to enable a better way of handling YAML when marshaling to and from structs.
A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs.
In short, this library first converts YAML to JSON using candiedyaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike candiedyaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/).
In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/).
## Compatibility
This package uses [candiedyaml](https://github.com/cloudfoundry-incubator/candiedyaml) and therefore supports [everything candiedyaml supports](https://github.com/cloudfoundry-incubator/candiedyaml#candiedyaml).
This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility).
## Caveats

View File

@@ -7,7 +7,7 @@ import (
"reflect"
"strconv"
yaml "github.com/cloudfoundry-incubator/candiedyaml"
"gopkg.in/yaml.v2"
)
// Marshals the object into JSON then converts JSON to YAML and returns the

View File

@@ -31,6 +31,7 @@ Julien Lefevre <julien.lefevr at gmail.com>
Julien Schmidt <go-sql-driver at julienschmidt.com>
Kamil Dziedzic <kamil at klecza.pl>
Kevin Malachowski <kevin at chowski.com>
Lennart Rudolph <lrudolph at hmc.edu>
Leonardo YongUk Kim <dalinaum at gmail.com>
Luca Looz <luca.looz92 at gmail.com>
Lucas Liu <extrafliu at gmail.com>
@@ -42,8 +43,10 @@ Runrioter Wung <runrioter at gmail.com>
Soroush Pour <me at soroushjp.com>
Stan Putrya <root.vagner at gmail.com>
Stanley Gunawan <gunawan.stanley at gmail.com>
Xiangyu Hu <xiangyu.hu at outlook.com>
Xiaobing Jiang <s7v7nislands at gmail.com>
Xiuming Chen <cc at cxm.cc>
Zhenye Xie <xiezhenye at gmail.com>
# Organizations

View File

@@ -3,16 +3,30 @@
Changes:
- Go 1.1 is no longer supported
- Use decimals field from MySQL to format time types (#249)
- Use decimals fields in MySQL to format time types (#249)
- Buffer optimizations (#269)
- TLS ServerName defaults to the host (#283)
- Refactoring (#400, #410, #437)
- Adjusted documentation for second generation CloudSQL (#485)
New Features:
- Enable microsecond resolution on TIME, DATETIME and TIMESTAMP (#249)
- Support for returning table alias on Columns() (#289, #359, #382)
- Placeholder interpolation, can be actived with the DSN parameter `interpolateParams=true` (#309, #318, #490)
- Support for uint64 parameters with high bit set (#332, #345)
- Cleartext authentication plugin support (#327)
- Exported ParseDSN function and the Config struct (#403, #419, #429)
- Read / Write timeouts (#401)
- Support for JSON field type (#414)
- Support for multi-statements and multi-results (#411, #431)
- DSN parameter to set the driver-side max_allowed_packet value manually (#489)
Bugfixes:
- Enable microsecond resolution on TIME, DATETIME and TIMESTAMP (#249)
- Fixed handling of queries without columns and rows (#255)
- Fixed a panic when SetKeepAlive() failed (#298)
- Support receiving ERR packet while reading rows (#321)
- Handle ERR packets while reading rows (#321)
- Fixed reading NULL length-encoded integers in MySQL 5.6+ (#349)
- Fixed absolute paths support in LOAD LOCAL DATA INFILE (#356)
- Actually zero out bytes in handshake response (#378)
@@ -20,13 +34,10 @@ Bugfixes:
- Fixed tests with MySQL 5.7.9+ (#380)
- QueryUnescape TLS config names (#397)
- Fixed "broken pipe" error by writing to closed socket (#390)
New Features:
- Support for returning table alias on Columns() (#289, #359, #382)
- Placeholder interpolation, can be actived with the DSN parameter `interpolateParams=true` (#309, #318)
- Support for uint64 parameters with high bit set (#332, #345)
- Cleartext authentication plugin support (#327)
- Fixed LOAD LOCAL DATA INFILE buffering (#424)
- Fixed parsing of floats into float64 when placeholders are used (#434)
- Fixed DSN tests with Go 1.7+ (#459)
- Handle ERR packets while waiting for EOF (#473)
## Version 1.2 (2014-06-03)

View File

@@ -135,6 +135,15 @@ Default: false
`allowCleartextPasswords=true` allows using the [cleartext client side plugin](http://dev.mysql.com/doc/en/cleartext-authentication-plugin.html) if required by an account, such as one defined with the [PAM authentication plugin](http://dev.mysql.com/doc/en/pam-authentication-plugin.html). Sending passwords in clear text may be a security problem in some configurations. To avoid problems if there is any possibility that the password would be intercepted, clients should connect to MySQL Server using a method that protects the password. Possibilities include [TLS / SSL](#tls), IPsec, or a private network.
##### `allowNativePasswords`
```
Type: bool
Valid Values: true, false
Default: false
```
`allowNativePasswords=true` allows the usage of the mysql native password method.
##### `allowOldPasswords`
```
@@ -221,6 +230,14 @@ Note that this sets the location for time.Time values but does not change MySQL'
Please keep in mind, that param values must be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed. Alternatively you can manually replace the `/` with `%2F`. For example `US/Pacific` would be `loc=US%2FPacific`.
##### `maxAllowedPacket`
```
Type: decimal number
Default: 0
```
Max packet size allowed in bytes. Use `maxAllowedPacket=0` to automatically fetch the `max_allowed_packet` variable from server.
##### `multiStatements`
```
@@ -233,7 +250,6 @@ Allow multiple statements in one query. While this allows batch queries, it also
When `multiStatements` is used, `?` parameters must only be used in the first statement.
##### `parseTime`
```
@@ -254,7 +270,6 @@ Default: 0
I/O read timeout. The value must be a decimal number with an unit suffix ( *"ms"*, *"s"*, *"m"*, *"h"* ), such as *"30s"*, *"0.5m"* or *"1m30s"*.
##### `strict`
```
@@ -263,10 +278,11 @@ Valid Values: true, false
Default: false
```
`strict=true` enables the strict mode in which MySQL warnings are treated as errors.
`strict=true` enables a driver-side strict mode in which MySQL warnings are treated as errors. This mode should not be used in production as it may lead to data corruption in certain situations.
By default MySQL also treats notes as warnings. Use [`sql_notes=false`](http://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_sql_notes) to ignore notes. See the [examples](#examples) for an DSN example.
A server-side strict mode, which is safe for production use, can be set via the [`sql_mode`](https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html) system variable.
By default MySQL also treats notes as warnings. Use [`sql_notes=false`](http://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_sql_notes) to ignore notes.
##### `timeout`
@@ -277,7 +293,6 @@ Default: OS default
*Driver* side connection timeout. The value must be a decimal number with an unit suffix ( *"ms"*, *"s"*, *"m"*, *"h"* ), such as *"30s"*, *"0.5m"* or *"1m30s"*. To set a server side timeout, use the parameter [`wait_timeout`](http://dev.mysql.com/doc/refman/5.6/en/server-system-variables.html#sysvar_wait_timeout).
##### `tls`
```
@@ -288,7 +303,6 @@ Default: false
`tls=true` enables TLS / SSL encrypted connection to the server. Use `skip-verify` if you want to use a self-signed or invalid certificate (server side). Use a custom value registered with [`mysql.RegisterTLSConfig`](http://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig).
##### `writeTimeout`
```
@@ -322,9 +336,9 @@ root:pw@unix(/tmp/mysql.sock)/myDatabase?loc=Local
user:password@tcp(localhost:5555)/dbname?tls=skip-verify&autocommit=true
```
Use the [strict mode](#strict) but ignore notes:
Treat warnings as errors by setting the system variable [`sql_mode`](https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html):
```
user:password@/dbname?strict=true&sql_notes=false
user:password@/dbname?sql_mode=TRADITIONAL
```
TCP via IPv6:
@@ -337,11 +351,16 @@ TCP on a remote host, e.g. Amazon RDS:
id:password@tcp(your-amazonaws-uri.com:3306)/dbname
```
Google Cloud SQL on App Engine:
Google Cloud SQL on App Engine (First Generation MySQL Server):
```
user@cloudsql(project-id:instance-name)/dbname
```
Google Cloud SQL on App Engine (Second Generation MySQL Server):
```
user@cloudsql(project-id:regionname:instance-name)/dbname
```
TCP using default port (3306) on localhost:
```
user:password@tcp/dbname?charset=utf8mb4,utf8&sys_var=esc%40ped

View File

@@ -22,7 +22,7 @@ type mysqlConn struct {
affectedRows uint64
insertId uint64
cfg *Config
maxPacketAllowed int
maxAllowedPacket int
maxWriteSize int
writeTimeout time.Duration
flags clientFlag
@@ -135,6 +135,11 @@ func (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) {
}
func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (string, error) {
// Number of ? should be same to len(args)
if strings.Count(query, "?") != len(args) {
return "", driver.ErrSkip
}
buf := mc.buf.takeCompleteBuffer()
if buf == nil {
// can not take the buffer. Something must be wrong with the connection
@@ -241,7 +246,7 @@ func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (strin
return "", driver.ErrSkip
}
if len(buf)+4 > mc.maxPacketAllowed {
if len(buf)+4 > mc.maxAllowedPacket {
return "", driver.ErrSkip
}
}

View File

@@ -50,7 +50,7 @@ func (d MySQLDriver) Open(dsn string) (driver.Conn, error) {
// New mysqlConn
mc := &mysqlConn{
maxPacketAllowed: maxPacketSize,
maxAllowedPacket: maxPacketSize,
maxWriteSize: maxPacketSize - 1,
}
mc.cfg, err = ParseDSN(dsn)
@@ -101,7 +101,7 @@ func (d MySQLDriver) Open(dsn string) (driver.Conn, error) {
}
// Handle response to auth packet, switch methods if possible
if err = handleAuthResult(mc, cipher); err != nil {
if err = handleAuthResult(mc); err != nil {
// Authentication failed and MySQL has already closed the connection
// (https://dev.mysql.com/doc/internals/en/authentication-fails.html).
// Do not send COM_QUIT, just cleanup and return the error.
@@ -109,15 +109,19 @@ func (d MySQLDriver) Open(dsn string) (driver.Conn, error) {
return nil, err
}
// Get max allowed packet size
maxap, err := mc.getSystemVar("max_allowed_packet")
if err != nil {
mc.Close()
return nil, err
if mc.cfg.MaxAllowedPacket > 0 {
mc.maxAllowedPacket = mc.cfg.MaxAllowedPacket
} else {
// Get max allowed packet size
maxap, err := mc.getSystemVar("max_allowed_packet")
if err != nil {
mc.Close()
return nil, err
}
mc.maxAllowedPacket = stringToInt(maxap) - 1
}
mc.maxPacketAllowed = stringToInt(maxap) - 1
if mc.maxPacketAllowed < maxPacketSize {
mc.maxWriteSize = mc.maxPacketAllowed
if mc.maxAllowedPacket < maxPacketSize {
mc.maxWriteSize = mc.maxAllowedPacket
}
// Handle DSN Params
@@ -130,9 +134,9 @@ func (d MySQLDriver) Open(dsn string) (driver.Conn, error) {
return mc, nil
}
func handleAuthResult(mc *mysqlConn, cipher []byte) error {
func handleAuthResult(mc *mysqlConn) error {
// Read Result Packet
err := mc.readResultOK()
cipher, err := mc.readResultOK()
if err == nil {
return nil // auth successful
}
@@ -149,7 +153,7 @@ func handleAuthResult(mc *mysqlConn, cipher []byte) error {
if err = mc.writeOldAuthPacket(cipher); err != nil {
return err
}
err = mc.readResultOK()
_, err = mc.readResultOK()
} else if mc.cfg.AllowCleartextPasswords && err == ErrCleartextPassword {
// Retry with clear text password for
// http://dev.mysql.com/doc/refman/5.7/en/cleartext-authentication-plugin.html
@@ -157,7 +161,12 @@ func handleAuthResult(mc *mysqlConn, cipher []byte) error {
if err = mc.writeClearAuthPacket(); err != nil {
return err
}
err = mc.readResultOK()
_, err = mc.readResultOK()
} else if mc.cfg.AllowNativePasswords && err == ErrNativePassword {
if err = mc.writeNativeAuthPacket(cipher); err != nil {
return err
}
_, err = mc.readResultOK()
}
return err
}

View File

@@ -15,6 +15,7 @@ import (
"fmt"
"net"
"net/url"
"strconv"
"strings"
"time"
)
@@ -28,22 +29,24 @@ var (
// Config is a configuration parsed from a DSN string
type Config struct {
User string // Username
Passwd string // Password (requires User)
Net string // Network type
Addr string // Network address (requires Net)
DBName string // Database name
Params map[string]string // Connection parameters
Collation string // Connection collation
Loc *time.Location // Location for time.Time values
TLSConfig string // TLS configuration name
tls *tls.Config // TLS configuration
Timeout time.Duration // Dial timeout
ReadTimeout time.Duration // I/O read timeout
WriteTimeout time.Duration // I/O write timeout
User string // Username
Passwd string // Password (requires User)
Net string // Network type
Addr string // Network address (requires Net)
DBName string // Database name
Params map[string]string // Connection parameters
Collation string // Connection collation
Loc *time.Location // Location for time.Time values
MaxAllowedPacket int // Max packet size allowed
TLSConfig string // TLS configuration name
tls *tls.Config // TLS configuration
Timeout time.Duration // Dial timeout
ReadTimeout time.Duration // I/O read timeout
WriteTimeout time.Duration // I/O write timeout
AllowAllFiles bool // Allow all files to be used with LOAD DATA LOCAL INFILE
AllowCleartextPasswords bool // Allows the cleartext client side plugin
AllowNativePasswords bool // Allows the native password authentication method
AllowOldPasswords bool // Allows the old insecure password method
ClientFoundRows bool // Return number of matching rows instead of rows changed
ColumnsWithAlias bool // Prepend table alias to column names
@@ -99,6 +102,15 @@ func (cfg *Config) FormatDSN() string {
}
}
if cfg.AllowNativePasswords {
if hasParam {
buf.WriteString("&allowNativePasswords=true")
} else {
hasParam = true
buf.WriteString("?allowNativePasswords=true")
}
}
if cfg.AllowOldPasswords {
if hasParam {
buf.WriteString("&allowOldPasswords=true")
@@ -222,6 +234,17 @@ func (cfg *Config) FormatDSN() string {
buf.WriteString(cfg.WriteTimeout.String())
}
if cfg.MaxAllowedPacket > 0 {
if hasParam {
buf.WriteString("&maxAllowedPacket=")
} else {
hasParam = true
buf.WriteString("?maxAllowedPacket=")
}
buf.WriteString(strconv.Itoa(cfg.MaxAllowedPacket))
}
// other params
if cfg.Params != nil {
for param, value := range cfg.Params {
@@ -368,6 +391,14 @@ func parseDSNParams(cfg *Config, params string) (err error) {
return errors.New("invalid bool value: " + value)
}
// Use native password authentication
case "allowNativePasswords":
var isBool bool
cfg.AllowNativePasswords, isBool = readBool(value)
if !isBool {
return errors.New("invalid bool value: " + value)
}
// Use old authentication mode (pre MySQL 4.1)
case "allowOldPasswords":
var isBool bool
@@ -496,7 +527,11 @@ func parseDSNParams(cfg *Config, params string) (err error) {
if err != nil {
return
}
case "maxAllowedPacket":
cfg.MaxAllowedPacket, err = strconv.Atoi(value)
if err != nil {
return
}
default:
// lazy init
if cfg.Params == nil {

View File

@@ -22,8 +22,9 @@ var (
ErrInvalidConn = errors.New("invalid connection")
ErrMalformPkt = errors.New("malformed packet")
ErrNoTLS = errors.New("TLS requested but server does not support TLS")
ErrOldPassword = errors.New("this user requires old password authentication. If you still want to use it, please add 'allowOldPasswords=1' to your DSN. See also https://github.com/go-sql-driver/mysql/wiki/old_passwords")
ErrCleartextPassword = errors.New("this user requires clear text authentication. If you still want to use it, please add 'allowCleartextPasswords=1' to your DSN")
ErrNativePassword = errors.New("this user requires mysql native password authentication.")
ErrOldPassword = errors.New("this user requires old password authentication. If you still want to use it, please add 'allowOldPasswords=1' to your DSN. See also https://github.com/go-sql-driver/mysql/wiki/old_passwords")
ErrUnknownPlugin = errors.New("this authentication plugin is not supported")
ErrOldProtocol = errors.New("MySQL server does not support required protocol 41+")
ErrPktSync = errors.New("commands out of sync. You can't run this command now")

View File

@@ -173,7 +173,8 @@ func (mc *mysqlConn) handleInFileRequest(name string) (err error) {
// read OK packet
if err == nil {
return mc.readResultOK()
_, err = mc.readResultOK()
return err
}
mc.readPacket()

View File

@@ -80,7 +80,7 @@ func (mc *mysqlConn) readPacket() ([]byte, error) {
func (mc *mysqlConn) writePacket(data []byte) error {
pktLen := len(data) - 4
if pktLen > mc.maxPacketAllowed {
if pktLen > mc.maxAllowedPacket {
return ErrPktTooLarge
}
@@ -372,6 +372,26 @@ func (mc *mysqlConn) writeClearAuthPacket() error {
return mc.writePacket(data)
}
// Native password authentication method
// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse
func (mc *mysqlConn) writeNativeAuthPacket(cipher []byte) error {
scrambleBuff := scramblePassword(cipher, []byte(mc.cfg.Passwd))
// Calculate the packet length and add a tailing 0
pktLen := len(scrambleBuff)
data := mc.buf.takeSmallBuffer(4 + pktLen)
if data == nil {
// can not take the buffer. Something must be wrong with the connection
errLog.Print(ErrBusyBuffer)
return driver.ErrBadConn
}
// Add the scramble
copy(data[4:], scrambleBuff)
return mc.writePacket(data)
}
/******************************************************************************
* Command Packets *
******************************************************************************/
@@ -445,36 +465,42 @@ func (mc *mysqlConn) writeCommandPacketUint32(command byte, arg uint32) error {
******************************************************************************/
// Returns error if Packet is not an 'Result OK'-Packet
func (mc *mysqlConn) readResultOK() error {
func (mc *mysqlConn) readResultOK() ([]byte, error) {
data, err := mc.readPacket()
if err == nil {
// packet indicator
switch data[0] {
case iOK:
return mc.handleOkPacket(data)
return nil, mc.handleOkPacket(data)
case iEOF:
if len(data) > 1 {
plugin := string(data[1:bytes.IndexByte(data, 0x00)])
pluginEndIndex := bytes.IndexByte(data, 0x00)
plugin := string(data[1:pluginEndIndex])
cipher := data[pluginEndIndex+1 : len(data)-1]
if plugin == "mysql_old_password" {
// using old_passwords
return ErrOldPassword
return cipher, ErrOldPassword
} else if plugin == "mysql_clear_password" {
// using clear text password
return ErrCleartextPassword
return cipher, ErrCleartextPassword
} else if plugin == "mysql_native_password" {
// using mysql default authentication method
return cipher, ErrNativePassword
} else {
return ErrUnknownPlugin
return cipher, ErrUnknownPlugin
}
} else {
return ErrOldPassword
return nil, ErrOldPassword
}
default: // Error otherwise
return mc.handleErrorPacket(data)
return nil, mc.handleErrorPacket(data)
}
}
return err
return nil, err
}
// Result Set Header Packet
@@ -786,7 +812,7 @@ func (stmt *mysqlStmt) readPrepareResultPacket() (uint16, error) {
// http://dev.mysql.com/doc/internals/en/com-stmt-send-long-data.html
func (stmt *mysqlStmt) writeCommandLongData(paramID int, arg []byte) error {
maxLen := stmt.mc.maxPacketAllowed - 1
maxLen := stmt.mc.maxAllowedPacket - 1
pktLen := maxLen
// After the header (bytes 0-3) follows before the data:
@@ -977,7 +1003,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
paramTypes[i+i] = fieldTypeString
paramTypes[i+i+1] = 0x00
if len(v) < mc.maxPacketAllowed-pos-len(paramValues)-(len(args)-(i+1))*64 {
if len(v) < mc.maxAllowedPacket-pos-len(paramValues)-(len(args)-(i+1))*64 {
paramValues = appendLengthEncodedInteger(paramValues,
uint64(len(v)),
)
@@ -999,7 +1025,7 @@ func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error {
paramTypes[i+i] = fieldTypeString
paramTypes[i+i+1] = 0x00
if len(v) < mc.maxPacketAllowed-pos-len(paramValues)-(len(args)-(i+1))*64 {
if len(v) < mc.maxAllowedPacket-pos-len(paramValues)-(len(args)-(i+1))*64 {
paramValues = appendLengthEncodedInteger(paramValues,
uint64(len(v)),
)

View File

@@ -79,3 +79,6 @@ Bartosz Burclaf <burclaf@gmail.com>
Marcus King <marcusking01@gmail.com>
Andrew de Andrade <andrew@deandrade.com.br>
Robert Nix <robert@nicerobot.org>
Nathan Youngman <git@nathany.com>
Charles Law <charles.law@gmail.com>; <claw@conduce.com>
Nathan Davies <nathanjamesdavies@gmail.com>

View File

@@ -2,37 +2,28 @@ gocql
=====
[![Join the chat at https://gitter.im/gocql/gocql](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/gocql/gocql?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
[![Build Status](https://travis-ci.org/gocql/gocql.png?branch=master)](https://travis-ci.org/gocql/gocql)
[![GoDoc](http://godoc.org/github.com/gocql/gocql?status.png)](http://godoc.org/github.com/gocql/gocql)
[![Build Status](https://travis-ci.org/gocql/gocql.svg?branch=master)](https://travis-ci.org/gocql/gocql)
[![GoDoc](https://godoc.org/github.com/gocql/gocql?status.svg)](https://godoc.org/github.com/gocql/gocql)
Package gocql implements a fast and robust Cassandra client for the
Go programming language.
Project Website: http://gocql.github.io/<br>
API documentation: http://godoc.org/github.com/gocql/gocql<br>
Project Website: https://gocql.github.io/<br>
API documentation: https://godoc.org/github.com/gocql/gocql<br>
Discussions: https://groups.google.com/forum/#!forum/gocql
Production Stability
--------------------
The method in which the driver maintains and discovers hosts in the Cassandra cluster changed when adding support for event driven discovery using server-side events. This has meant many things in the driver internally have been touched and changed, as such if you would like to go back to the historical node discovery the tag `pre-node-events` is a tree which uses the old polling based discovery.
If you run into bugs related to node discovery using events please open a ticket.
Supported Versions
------------------
The following matrix shows the versions of Go and Cassandra that are tested with the integration test suite as part of the CI build:
Go/Cassandra | 2.0.x | 2.1.x | 2.2.x
Go/Cassandra | 2.1.x | 2.2.x | 3.0.x
-------------| -------| ------| ---------
1.6 | yes | yes | yes
1.7 | yes | yes | yes
Gocql has been tested in production against many different versions of Cassandra. Due to limits in our CI setup we only test against the latest 3 major releases, which coincide with the official support from the Apache project.
NOTE: as of Cassandra 3.0 it requires Java 8, currently (06/02/2016) we can not install Java 8 in Travis to run the integration tests. To run on Cassandra >=3.0 enable protocol 4 and it should work fine, if not please report bugs.
Sunsetting Model
----------------
@@ -72,7 +63,7 @@ Features
* Support for tuple types
* Support for client side timestamps by default
* Support for UDTs via a custom marshaller or struct tags
* Support for Cassandra 2.2+ [binary protocol version 4](https://github.com/apache/cassandra/blob/trunk/doc/native_protocol_v4.spec)
* Support for Cassandra 3.0+ [binary protocol version 4](https://github.com/apache/cassandra/blob/trunk/doc/native_protocol_v4.spec)
* An API to access the schema metadata of a given keyspace
Performance

View File

@@ -23,13 +23,27 @@ func (p PoolConfig) buildPool(session *Session) *policyConnPool {
}
// ClusterConfig is a struct to configure the default cluster implementation
// of gocoql. It has a variety of attributes that can be used to modify the
// of gocql. It has a variety of attributes that can be used to modify the
// behavior to fit the most common use cases. Applications that require a
// different setup must implement their own cluster.
type ClusterConfig struct {
Hosts []string // addresses for the initial connections
CQLVersion string // CQL version (default: 3.0.0)
ProtoVersion int // version of the native protocol (default: 2)
// addresses for the initial connections. It is recomended to use the value set in
// the Cassandra config for broadcast_address or listen_address, an IP address not
// a domain name. This is because events from Cassandra will use the configured IP
// address, which is used to index connected hosts. If the domain name specified
// resolves to more than 1 IP address then the driver may connect multiple times to
// the same host, and will not mark the node being down or up from events.
Hosts []string
CQLVersion string // CQL version (default: 3.0.0)
// ProtoVersion sets the version of the native protocol to use, this will
// enable features in the driver for specific protocol versions, generally this
// should be set to a known version (2,3,4) for the cluster being connected to.
//
// If it is 0 or unset (the default) then the driver will attempt to discover the
// highest supported protocol for the cluster. In clusters with nodes of different
// versions the protocol selected is not defined (ie, it can be any of the supported in the cluster)
ProtoVersion int
Timeout time.Duration // connection timeout (default: 600ms)
Port int // port (default: 9042)
Keyspace string // initial keyspace (optional)
@@ -100,11 +114,18 @@ type ClusterConfig struct {
}
// NewCluster generates a new config for the default cluster implementation.
//
// The supplied hosts are used to initially connect to the cluster then the rest of
// the ring will be automatically discovered. It is recomended to use the value set in
// the Cassandra config for broadcast_address or listen_address, an IP address not
// a domain name. This is because events from Cassandra will use the configured IP
// address, which is used to index connected hosts. If the domain name specified
// resolves to more than 1 IP address then the driver may connect multiple times to
// the same host, and will not mark the node being down or up from events.
func NewCluster(hosts ...string) *ClusterConfig {
cfg := &ClusterConfig{
Hosts: hosts,
CQLVersion: "3.0.0",
ProtoVersion: 2,
Timeout: 600 * time.Millisecond,
Port: 9042,
NumConns: 2,

View File

@@ -152,8 +152,15 @@ type Conn struct {
}
// Connect establishes a connection to a Cassandra node.
func Connect(host *HostInfo, addr string, cfg *ConnConfig,
errorHandler ConnErrorHandler, session *Session) (*Conn, error) {
func Connect(host *HostInfo, cfg *ConnConfig, errorHandler ConnErrorHandler, session *Session) (*Conn, error) {
// TODO(zariel): remove these
if host == nil {
panic("host is nil")
} else if len(host.Peer()) == 0 {
panic("host missing peer ip address")
} else if host.Port() == 0 {
panic("host missing port")
}
var (
err error
@@ -164,6 +171,9 @@ func Connect(host *HostInfo, addr string, cfg *ConnConfig,
Timeout: cfg.Timeout,
}
// TODO(zariel): handle ipv6 zone
addr := (&net.TCPAddr{IP: host.Peer(), Port: host.Port()}).String()
if cfg.tlsConfig != nil {
// the TLS config is safe to be reused by connections but it must not
// be modified after being used.
@@ -430,6 +440,17 @@ func (c *Conn) discardFrame(head frameHeader) error {
return nil
}
type protocolError struct {
frame frame
}
func (p *protocolError) Error() string {
if err, ok := p.frame.(error); ok {
return err.Error()
}
return fmt.Sprintf("gocql: received unexpected frame on stream %d: %v", p.frame.Header().stream, p.frame)
}
func (c *Conn) recv() error {
// not safe for concurrent reads
@@ -469,11 +490,8 @@ func (c *Conn) recv() error {
return err
}
switch v := frame.(type) {
case error:
return fmt.Errorf("gocql: error on stream %d: %v", head.stream, v)
default:
return fmt.Errorf("gocql: received frame on stream %d: %v", head.stream, frame)
return &protocolError{
frame: frame,
}
}

View File

@@ -130,9 +130,10 @@ func (p *policyConnPool) SetHosts(hosts []*HostInfo) {
// don't create a connection pool for a down host
continue
}
if _, exists := p.hostConnPools[host.Peer()]; exists {
ip := host.Peer().String()
if _, exists := p.hostConnPools[ip]; exists {
// still have this host, so don't remove it
delete(toRemove, host.Peer())
delete(toRemove, ip)
continue
}
@@ -155,7 +156,7 @@ func (p *policyConnPool) SetHosts(hosts []*HostInfo) {
createCount--
if pool.Size() > 0 {
// add pool onyl if there a connections available
p.hostConnPools[pool.host.Peer()] = pool
p.hostConnPools[string(pool.host.Peer())] = pool
}
}
@@ -177,9 +178,10 @@ func (p *policyConnPool) Size() int {
return count
}
func (p *policyConnPool) getPool(addr string) (pool *hostConnPool, ok bool) {
func (p *policyConnPool) getPool(host *HostInfo) (pool *hostConnPool, ok bool) {
ip := host.Peer().String()
p.mu.RLock()
pool, ok = p.hostConnPools[addr]
pool, ok = p.hostConnPools[ip]
p.mu.RUnlock()
return
}
@@ -196,8 +198,9 @@ func (p *policyConnPool) Close() {
}
func (p *policyConnPool) addHost(host *HostInfo) {
ip := host.Peer().String()
p.mu.Lock()
pool, ok := p.hostConnPools[host.Peer()]
pool, ok := p.hostConnPools[ip]
if !ok {
pool = newHostConnPool(
p.session,
@@ -207,22 +210,23 @@ func (p *policyConnPool) addHost(host *HostInfo) {
p.keyspace,
)
p.hostConnPools[host.Peer()] = pool
p.hostConnPools[ip] = pool
}
p.mu.Unlock()
pool.fill()
}
func (p *policyConnPool) removeHost(addr string) {
func (p *policyConnPool) removeHost(ip net.IP) {
k := ip.String()
p.mu.Lock()
pool, ok := p.hostConnPools[addr]
pool, ok := p.hostConnPools[k]
if !ok {
p.mu.Unlock()
return
}
delete(p.hostConnPools, addr)
delete(p.hostConnPools, k)
p.mu.Unlock()
go pool.Close()
@@ -234,10 +238,10 @@ func (p *policyConnPool) hostUp(host *HostInfo) {
p.addHost(host)
}
func (p *policyConnPool) hostDown(addr string) {
func (p *policyConnPool) hostDown(ip net.IP) {
// TODO(zariel): mark host as down so we can try to connect to it later, for
// now just treat it has removed.
p.removeHost(addr)
p.removeHost(ip)
}
// hostConnPool is a connection pool for a single host.
@@ -272,7 +276,7 @@ func newHostConnPool(session *Session, host *HostInfo, port, size int,
session: session,
host: host,
port: port,
addr: JoinHostPort(host.Peer(), port),
addr: (&net.TCPAddr{IP: host.Peer(), Port: host.Port()}).String(),
size: size,
keyspace: keyspace,
conns: make([]*Conn, 0, size),
@@ -396,7 +400,7 @@ func (pool *hostConnPool) fill() {
// this is calle with the connetion pool mutex held, this call will
// then recursivly try to lock it again. FIXME
go pool.session.handleNodeDown(net.ParseIP(pool.host.Peer()), pool.port)
go pool.session.handleNodeDown(pool.host.Peer(), pool.port)
return
}
@@ -477,7 +481,7 @@ func (pool *hostConnPool) connect() (err error) {
// try to connect
var conn *Conn
for i := 0; i < maxAttempts; i++ {
conn, err = pool.session.connect(pool.addr, pool, pool.host)
conn, err = pool.session.connect(pool.host, pool)
if err == nil {
break
}

View File

@@ -4,13 +4,15 @@ import (
crand "crypto/rand"
"errors"
"fmt"
"golang.org/x/net/context"
"log"
"math/rand"
"net"
"regexp"
"strconv"
"sync/atomic"
"time"
"golang.org/x/net/context"
)
var (
@@ -89,6 +91,8 @@ func (c *controlConn) heartBeat() {
}
}
var hostLookupPreferV4 = false
func hostInfo(addr string, defaultPort int) (*HostInfo, error) {
var port int
host, portStr, err := net.SplitHostPort(addr)
@@ -102,55 +106,120 @@ func hostInfo(addr string, defaultPort int) (*HostInfo, error) {
}
}
return &HostInfo{peer: host, port: port}, nil
}
ip := net.ParseIP(host)
if ip == nil {
ips, err := net.LookupIP(host)
if err != nil {
return nil, err
} else if len(ips) == 0 {
return nil, fmt.Errorf("No IP's returned from DNS lookup for %q", addr)
}
func (c *controlConn) shuffleDial(endpoints []string) (conn *Conn, err error) {
perm := randr.Perm(len(endpoints))
shuffled := make([]string, len(endpoints))
if hostLookupPreferV4 {
for _, v := range ips {
if v4 := v.To4(); v4 != nil {
ip = v4
break
}
}
if ip == nil {
ip = ips[0]
}
} else {
// TODO(zariel): should we check that we can connect to any of the ips?
ip = ips[0]
}
for i, endpoint := range endpoints {
shuffled[perm[i]] = endpoint
}
return &HostInfo{peer: ip, port: port}, nil
}
func shuffleHosts(hosts []*HostInfo) []*HostInfo {
perm := randr.Perm(len(hosts))
shuffled := make([]*HostInfo, len(hosts))
for i, host := range hosts {
shuffled[perm[i]] = host
}
return shuffled
}
func (c *controlConn) shuffleDial(endpoints []*HostInfo) (*Conn, error) {
// shuffle endpoints so not all drivers will connect to the same initial
// node.
for _, addr := range shuffled {
if addr == "" {
return nil, fmt.Errorf("invalid address: %q", addr)
}
shuffled := shuffleHosts(endpoints)
port := c.session.cfg.Port
addr = JoinHostPort(addr, port)
var host *HostInfo
host, err = hostInfo(addr, port)
if err != nil {
return nil, fmt.Errorf("invalid address: %q: %v", addr, err)
}
hostInfo, _ := c.session.ring.addHostIfMissing(host)
conn, err = c.session.connect(addr, c, hostInfo)
var err error
for _, host := range shuffled {
var conn *Conn
conn, err = c.session.connect(host, c)
if err == nil {
return conn, err
return conn, nil
}
log.Printf("gocql: unable to dial control conn %v: %v\n", addr, err)
log.Printf("gocql: unable to dial control conn %v: %v\n", host.Peer(), err)
}
if err != nil {
return nil, err
}
return conn, nil
return nil, err
}
func (c *controlConn) connect(endpoints []string) error {
if len(endpoints) == 0 {
// this is going to be version dependant and a nightmare to maintain :(
var protocolSupportRe = regexp.MustCompile(`the lowest supported version is \d+ and the greatest is (\d+)$`)
func parseProtocolFromError(err error) int {
// I really wish this had the actual info in the error frame...
matches := protocolSupportRe.FindAllStringSubmatch(err.Error(), -1)
if len(matches) != 1 || len(matches[0]) != 2 {
if verr, ok := err.(*protocolError); ok {
return int(verr.frame.Header().version.version())
}
return 0
}
max, err := strconv.Atoi(matches[0][1])
if err != nil {
return 0
}
return max
}
func (c *controlConn) discoverProtocol(hosts []*HostInfo) (int, error) {
hosts = shuffleHosts(hosts)
connCfg := *c.session.connCfg
connCfg.ProtoVersion = 4 // TODO: define maxProtocol
handler := connErrorHandlerFn(func(c *Conn, err error, closed bool) {
// we should never get here, but if we do it means we connected to a
// host successfully which means our attempted protocol version worked
})
var err error
for _, host := range hosts {
var conn *Conn
conn, err = Connect(host, &connCfg, handler, c.session)
if err == nil {
conn.Close()
return connCfg.ProtoVersion, nil
}
if proto := parseProtocolFromError(err); proto > 0 {
return proto, nil
}
}
return 0, err
}
func (c *controlConn) connect(hosts []*HostInfo) error {
if len(hosts) == 0 {
return errors.New("control: no endpoints specified")
}
conn, err := c.shuffleDial(endpoints)
conn, err := c.shuffleDial(hosts)
if err != nil {
return fmt.Errorf("control: unable to connect to initial hosts: %v", err)
}
@@ -229,22 +298,21 @@ func (c *controlConn) reconnect(refreshring bool) {
// TODO: simplify this function, use session.ring to get hosts instead of the
// connection pool
addr := c.addr()
var host *HostInfo
oldConn := c.conn.Load().(*Conn)
if oldConn != nil {
host = oldConn.host
oldConn.Close()
}
var newConn *Conn
if addr != "" {
if host != nil {
// try to connect to the old host
conn, err := c.session.connect(addr, c, oldConn.host)
conn, err := c.session.connect(host, c)
if err != nil {
// host is dead
// TODO: this is replicated in a few places
ip, portStr, _ := net.SplitHostPort(addr)
port, _ := strconv.Atoi(portStr)
c.session.handleNodeDown(net.ParseIP(ip), port)
c.session.handleNodeDown(host.Peer(), host.Port())
} else {
newConn = conn
}
@@ -260,7 +328,7 @@ func (c *controlConn) reconnect(refreshring bool) {
}
var err error
newConn, err = c.session.connect(host.Peer(), c, host)
newConn, err = c.session.connect(host, c)
if err != nil {
// TODO: add log handler for things like this
return
@@ -350,29 +418,28 @@ func (c *controlConn) query(statement string, values ...interface{}) (iter *Iter
return
}
func (c *controlConn) fetchHostInfo(addr net.IP, port int) (*HostInfo, error) {
func (c *controlConn) fetchHostInfo(ip net.IP, port int) (*HostInfo, error) {
// TODO(zariel): we should probably move this into host_source or atleast
// share code with it.
hostname, _, err := net.SplitHostPort(c.addr())
if err != nil {
return nil, fmt.Errorf("unable to fetch host info, invalid conn addr: %q: %v", c.addr(), err)
localHost := c.host()
if localHost == nil {
return nil, errors.New("unable to fetch host info, invalid conn host")
}
isLocal := hostname == addr.String()
isLocal := localHost.Peer().Equal(ip)
var fn func(*HostInfo) error
// TODO(zariel): fetch preferred_ip address (is it >3.x only?)
if isLocal {
fn = func(host *HostInfo) error {
// TODO(zariel): should we fetch rpc_address from here?
iter := c.query("SELECT data_center, rack, host_id, tokens, release_version FROM system.local WHERE key='local'")
iter.Scan(&host.dataCenter, &host.rack, &host.hostId, &host.tokens, &host.version)
return iter.Close()
}
} else {
fn = func(host *HostInfo) error {
// TODO(zariel): should we fetch rpc_address from here?
iter := c.query("SELECT data_center, rack, host_id, tokens, release_version FROM system.peers WHERE peer=?", addr)
iter := c.query("SELECT data_center, rack, host_id, tokens, release_version FROM system.peers WHERE peer=?", ip)
iter.Scan(&host.dataCenter, &host.rack, &host.hostId, &host.tokens, &host.version)
return iter.Close()
}
@@ -380,12 +447,12 @@ func (c *controlConn) fetchHostInfo(addr net.IP, port int) (*HostInfo, error) {
host := &HostInfo{
port: port,
peer: ip,
}
if err := fn(host); err != nil {
return nil, err
}
host.peer = addr.String()
return host, nil
}
@@ -396,12 +463,12 @@ func (c *controlConn) awaitSchemaAgreement() error {
}).err
}
func (c *controlConn) addr() string {
func (c *controlConn) host() *HostInfo {
conn := c.conn.Load().(*Conn)
if conn == nil {
return ""
return nil
}
return conn.addr
return conn.host
}
func (c *controlConn) close() {

View File

@@ -7,7 +7,7 @@ import (
"time"
)
type eventDeouncer struct {
type eventDebouncer struct {
name string
timer *time.Timer
mu sync.Mutex
@@ -17,8 +17,8 @@ type eventDeouncer struct {
quit chan struct{}
}
func newEventDeouncer(name string, eventHandler func([]frame)) *eventDeouncer {
e := &eventDeouncer{
func newEventDebouncer(name string, eventHandler func([]frame)) *eventDebouncer {
e := &eventDebouncer{
name: name,
quit: make(chan struct{}),
timer: time.NewTimer(eventDebounceTime),
@@ -30,12 +30,12 @@ func newEventDeouncer(name string, eventHandler func([]frame)) *eventDeouncer {
return e
}
func (e *eventDeouncer) stop() {
func (e *eventDebouncer) stop() {
e.quit <- struct{}{} // sync with flusher
close(e.quit)
}
func (e *eventDeouncer) flusher() {
func (e *eventDebouncer) flusher() {
for {
select {
case <-e.timer.C:
@@ -54,7 +54,7 @@ const (
)
// flush must be called with mu locked
func (e *eventDeouncer) flush() {
func (e *eventDebouncer) flush() {
if len(e.events) == 0 {
return
}
@@ -66,7 +66,7 @@ func (e *eventDeouncer) flush() {
e.events = make([]frame, 0, eventBufferSize)
}
func (e *eventDeouncer) debounce(frame frame) {
func (e *eventDebouncer) debounce(frame frame) {
e.mu.Lock()
e.timer.Reset(eventDebounceTime)
@@ -171,25 +171,21 @@ func (s *Session) handleNodeEvent(frames []frame) {
}
}
func (s *Session) handleNewNode(host net.IP, port int, waitForBinary bool) {
// TODO(zariel): need to be able to filter discovered nodes
func (s *Session) handleNewNode(ip net.IP, port int, waitForBinary bool) {
var hostInfo *HostInfo
if s.control != nil && !s.cfg.IgnorePeerAddr {
var err error
hostInfo, err = s.control.fetchHostInfo(host, port)
hostInfo, err = s.control.fetchHostInfo(ip, port)
if err != nil {
log.Printf("gocql: events: unable to fetch host info for %v: %v\n", host, err)
log.Printf("gocql: events: unable to fetch host info for (%s:%d): %v\n", ip, port, err)
return
}
} else {
hostInfo = &HostInfo{peer: host.String(), port: port, state: NodeUp}
hostInfo = &HostInfo{peer: ip, port: port}
}
addr := host.String()
if s.cfg.IgnorePeerAddr && hostInfo.Peer() != addr {
hostInfo.setPeer(addr)
if s.cfg.IgnorePeerAddr && hostInfo.Peer().Equal(ip) {
hostInfo.setPeer(ip)
}
if s.cfg.HostFilter != nil && !s.cfg.HostFilter.Accept(hostInfo) {
@@ -217,11 +213,9 @@ func (s *Session) handleNewNode(host net.IP, port int, waitForBinary bool) {
func (s *Session) handleRemovedNode(ip net.IP, port int) {
// we remove all nodes but only add ones which pass the filter
addr := ip.String()
host := s.ring.getHost(addr)
host := s.ring.getHost(ip)
if host == nil {
host = &HostInfo{peer: addr}
host = &HostInfo{peer: ip, port: port}
}
if s.cfg.HostFilter != nil && !s.cfg.HostFilter.Accept(host) {
@@ -229,9 +223,9 @@ func (s *Session) handleRemovedNode(ip net.IP, port int) {
}
host.setState(NodeDown)
s.policy.RemoveHost(addr)
s.pool.removeHost(addr)
s.ring.removeHost(addr)
s.policy.RemoveHost(host)
s.pool.removeHost(ip)
s.ring.removeHost(ip)
if !s.cfg.IgnorePeerAddr {
s.hostSource.refreshRing()
@@ -242,11 +236,12 @@ func (s *Session) handleNodeUp(ip net.IP, port int, waitForBinary bool) {
if gocqlDebug {
log.Printf("gocql: Session.handleNodeUp: %s:%d\n", ip.String(), port)
}
addr := ip.String()
host := s.ring.getHost(addr)
host := s.ring.getHost(ip)
if host != nil {
if s.cfg.IgnorePeerAddr && host.Peer() != addr {
host.setPeer(addr)
if s.cfg.IgnorePeerAddr && host.Peer().Equal(ip) {
// TODO: how can this ever be true?
host.setPeer(ip)
}
if s.cfg.HostFilter != nil && !s.cfg.HostFilter.Accept(host) {
@@ -257,7 +252,6 @@ func (s *Session) handleNodeUp(ip net.IP, port int, waitForBinary bool) {
time.Sleep(t)
}
host.setPort(port)
s.pool.hostUp(host)
s.policy.HostUp(host)
host.setState(NodeUp)
@@ -271,10 +265,10 @@ func (s *Session) handleNodeDown(ip net.IP, port int) {
if gocqlDebug {
log.Printf("gocql: Session.handleNodeDown: %s:%d\n", ip.String(), port)
}
addr := ip.String()
host := s.ring.getHost(addr)
host := s.ring.getHost(ip)
if host == nil {
host = &HostInfo{peer: addr}
host = &HostInfo{peer: ip, port: port}
}
if s.cfg.HostFilter != nil && !s.cfg.HostFilter.Accept(host) {
@@ -282,6 +276,6 @@ func (s *Session) handleNodeDown(ip net.IP, port int) {
}
host.setState(NodeDown)
s.policy.HostDown(addr)
s.pool.hostDown(addr)
s.policy.HostDown(host)
s.pool.hostDown(ip)
}

View File

@@ -1,5 +1,7 @@
package gocql
import "fmt"
// HostFilter interface is used when a host is discovered via server sent events.
type HostFilter interface {
// Called when a new host is discovered, returning true will cause the host
@@ -38,12 +40,18 @@ func DataCentreHostFilter(dataCentre string) HostFilter {
// WhiteListHostFilter filters incoming hosts by checking that their address is
// in the initial hosts whitelist.
func WhiteListHostFilter(hosts ...string) HostFilter {
m := make(map[string]bool, len(hosts))
for _, host := range hosts {
m[host] = true
hostInfos, err := addrsToHosts(hosts, 9042)
if err != nil {
// dont want to panic here, but rather not break the API
panic(fmt.Errorf("unable to lookup host info from address: %v", err))
}
m := make(map[string]bool, len(hostInfos))
for _, host := range hostInfos {
m[string(host.peer)] = true
}
return HostFilterFunc(func(host *HostInfo) bool {
return m[host.Peer()]
return m[string(host.Peer())]
})
}

View File

@@ -365,7 +365,7 @@ func readHeader(r io.Reader, p []byte) (head frameHeader, err error) {
version := p[0] & protoVersionMask
if version < protoVersion1 || version > protoVersion4 {
return frameHeader{}, fmt.Errorf("gocql: unsupported response version: %d", version)
return frameHeader{}, fmt.Errorf("gocql: unsupported protocol response version: %d", version)
}
headSize := 9

View File

@@ -100,7 +100,7 @@ type HostInfo struct {
// TODO(zariel): reduce locking maybe, not all values will change, but to ensure
// that we are thread safe use a mutex to access all fields.
mu sync.RWMutex
peer string
peer net.IP
port int
dataCenter string
rack string
@@ -116,16 +116,16 @@ func (h *HostInfo) Equal(host *HostInfo) bool {
host.mu.RLock()
defer host.mu.RUnlock()
return h.peer == host.peer && h.hostId == host.hostId
return h.peer.Equal(host.peer)
}
func (h *HostInfo) Peer() string {
func (h *HostInfo) Peer() net.IP {
h.mu.RLock()
defer h.mu.RUnlock()
return h.peer
}
func (h *HostInfo) setPeer(peer string) *HostInfo {
func (h *HostInfo) setPeer(peer net.IP) *HostInfo {
h.mu.Lock()
defer h.mu.Unlock()
h.peer = peer
@@ -314,7 +314,11 @@ func (r *ringDescriber) GetHosts() (hosts []*HostInfo, partitioner string, err e
return nil, "", err
}
} else {
iter := r.session.control.query(legacyLocalQuery)
iter := r.session.control.withConn(func(c *Conn) *Iter {
localHost = c.host
return c.query(legacyLocalQuery)
})
if iter == nil {
return r.prevHosts, r.prevPartitioner, nil
}
@@ -324,15 +328,6 @@ func (r *ringDescriber) GetHosts() (hosts []*HostInfo, partitioner string, err e
if err = iter.Close(); err != nil {
return nil, "", err
}
addr, _, err := net.SplitHostPort(r.session.control.addr())
if err != nil {
// this should not happen, ever, as this is the address that was dialed by conn, here
// a panic makes sense, please report a bug if it occurs.
panic(err)
}
localHost.peer = addr
}
localHost.port = r.session.cfg.Port

View File

@@ -1,6 +1,6 @@
#!/bin/bash
set -e
set -eux
function run_tests() {
local clusterSize=3
@@ -47,7 +47,10 @@ function run_tests() {
proto=2
elif [[ $version == 2.1.* ]]; then
proto=3
elif [[ $version == 2.2.* ]]; then
elif [[ $version == 2.2.* || $version == 3.0.* ]]; then
proto=4
ccm updateconf 'enable_user_defined_functions: true'
elif [[ $version == 3.*.* ]]; then
proto=4
ccm updateconf 'enable_user_defined_functions: true'
fi
@@ -55,11 +58,11 @@ function run_tests() {
sleep 1s
ccm list
ccm start
ccm start --wait-for-binary-proto
ccm status
ccm node1 nodetool status
local args="-v -gocql.timeout=60s -runssl -proto=$proto -rf=3 -clusterSize=$clusterSize -autowait=2000ms -compressor=snappy -gocql.cversion=$version -cluster=$(ccm liveset) ./..."
local args="-gocql.timeout=60s -runssl -proto=$proto -rf=3 -clusterSize=$clusterSize -autowait=2000ms -compressor=snappy -gocql.cversion=$version -cluster=$(ccm liveset) ./..."
go test -v -tags unit

View File

@@ -1094,6 +1094,7 @@ func unmarshalTimestamp(info TypeInfo, data []byte, value interface{}) error {
return nil
case *time.Time:
if len(data) == 0 {
*v = time.Time{}
return nil
}
x := decBigInt(data)

View File

@@ -45,7 +45,7 @@ type ColumnMetadata struct {
Table string
Name string
ComponentIndex int
Kind string
Kind ColumnKind
Validator string
Type TypeInfo
ClusteringOrder string
@@ -67,14 +67,65 @@ type ColumnIndexMetadata struct {
Options map[string]interface{}
}
// Column kind values
type ColumnKind int
const (
PARTITION_KEY = "partition_key"
CLUSTERING_KEY = "clustering_key"
REGULAR = "regular"
COMPACT_VALUE = "compact_value"
ColumnUnkownKind ColumnKind = iota
ColumnPartitionKey
ColumnClusteringKey
ColumnRegular
ColumnCompact
ColumnStatic
)
func (c ColumnKind) String() string {
switch c {
case ColumnPartitionKey:
return "partition_key"
case ColumnClusteringKey:
return "clustering_key"
case ColumnRegular:
return "regular"
case ColumnCompact:
return "compact"
case ColumnStatic:
return "static"
default:
return fmt.Sprintf("unkown_column_%d", c)
}
}
func (c *ColumnKind) UnmarshalCQL(typ TypeInfo, p []byte) error {
if typ.Type() != TypeVarchar {
return unmarshalErrorf("unable to marshall %s into ColumnKind, expected Varchar", typ)
}
kind, err := columnKindFromSchema(string(p))
if err != nil {
return err
}
*c = kind
return nil
}
func columnKindFromSchema(kind string) (ColumnKind, error) {
switch kind {
case "partition_key":
return ColumnPartitionKey, nil
case "clustering_key", "clustering":
return ColumnClusteringKey, nil
case "regular":
return ColumnRegular, nil
case "compact_value":
return ColumnCompact, nil
case "static":
return ColumnStatic, nil
default:
return -1, fmt.Errorf("unknown column kind: %q", kind)
}
}
// default alias values
const (
DEFAULT_KEY_ALIAS = "key"
@@ -243,7 +294,7 @@ func compileV1Metadata(tables []TableMetadata) {
Table: table.Name,
Name: alias,
Type: keyValidatorParsed.types[i],
Kind: PARTITION_KEY,
Kind: ColumnPartitionKey,
ComponentIndex: i,
}
@@ -288,7 +339,7 @@ func compileV1Metadata(tables []TableMetadata) {
Name: alias,
Type: comparatorParsed.types[i],
Order: order,
Kind: CLUSTERING_KEY,
Kind: ColumnClusteringKey,
ComponentIndex: i,
}
@@ -308,7 +359,7 @@ func compileV1Metadata(tables []TableMetadata) {
Table: table.Name,
Name: alias,
Type: defaultValidatorParsed.types[0],
Kind: REGULAR,
Kind: ColumnRegular,
}
table.Columns[alias] = column
}
@@ -320,22 +371,22 @@ func compileV2Metadata(tables []TableMetadata) {
for i := range tables {
table := &tables[i]
clusteringColumnCount := componentColumnCountOfType(table.Columns, CLUSTERING_KEY)
clusteringColumnCount := componentColumnCountOfType(table.Columns, ColumnClusteringKey)
table.ClusteringColumns = make([]*ColumnMetadata, clusteringColumnCount)
if table.KeyValidator != "" {
keyValidatorParsed := parseType(table.KeyValidator)
table.PartitionKey = make([]*ColumnMetadata, len(keyValidatorParsed.types))
} else { // Cassandra 3.x+
partitionKeyCount := componentColumnCountOfType(table.Columns, PARTITION_KEY)
partitionKeyCount := componentColumnCountOfType(table.Columns, ColumnPartitionKey)
table.PartitionKey = make([]*ColumnMetadata, partitionKeyCount)
}
for _, columnName := range table.OrderedColumns {
column := table.Columns[columnName]
if column.Kind == PARTITION_KEY {
if column.Kind == ColumnPartitionKey {
table.PartitionKey[column.ComponentIndex] = column
} else if column.Kind == CLUSTERING_KEY {
} else if column.Kind == ColumnClusteringKey {
table.ClusteringColumns[column.ComponentIndex] = column
}
}
@@ -343,7 +394,7 @@ func compileV2Metadata(tables []TableMetadata) {
}
// returns the count of coluns with the given "kind" value.
func componentColumnCountOfType(columns map[string]*ColumnMetadata, kind string) int {
func componentColumnCountOfType(columns map[string]*ColumnMetadata, kind ColumnKind) int {
maxComponentIndex := -1
for _, column := range columns {
if column.Kind == kind && column.ComponentIndex > maxComponentIndex {
@@ -541,19 +592,11 @@ func getTableMetadata(session *Session, keyspaceName string) ([]TableMetadata, e
return tables, nil
}
// query for only the column metadata in the specified keyspace from system.schema_columns
func getColumnMetadata(
session *Session,
keyspaceName string,
) ([]ColumnMetadata, error) {
// Deal with differences in protocol versions
var stmt string
var scan func(*Iter, *ColumnMetadata, *[]byte) bool
if session.cfg.ProtoVersion == 1 {
// V1 does not support the type column, and all returned rows are
// of kind "regular".
stmt = `
SELECT
func (s *Session) scanColumnMetadataV1(keyspace string) ([]ColumnMetadata, error) {
// V1 does not support the type column, and all returned rows are
// of kind "regular".
const stmt = `
SELECT
columnfamily_name,
column_name,
component_index,
@@ -562,54 +605,57 @@ func getColumnMetadata(
index_type,
index_options
FROM system.schema_columns
WHERE keyspace_name = ?
`
scan = func(
iter *Iter,
column *ColumnMetadata,
indexOptionsJSON *[]byte,
) bool {
// all columns returned by V1 are regular
column.Kind = REGULAR
return iter.Scan(
&column.Table,
&column.Name,
&column.ComponentIndex,
&column.Validator,
&column.Index.Name,
&column.Index.Type,
&indexOptionsJSON,
)
WHERE keyspace_name = ?`
var columns []ColumnMetadata
rows := s.control.query(stmt, keyspace).Scanner()
for rows.Next() {
var (
column = ColumnMetadata{Keyspace: keyspace}
indexOptionsJSON []byte
)
// all columns returned by V1 are regular
column.Kind = ColumnRegular
err := rows.Scan(&column.Table,
&column.Name,
&column.ComponentIndex,
&column.Validator,
&column.Index.Name,
&column.Index.Type,
&indexOptionsJSON)
if err != nil {
return nil, err
}
} else if session.useSystemSchema { // Cassandra 3.x+
stmt = `
SELECT
table_name,
column_name,
clustering_order,
type,
kind,
position
FROM system_schema.columns
WHERE keyspace_name = ?
`
scan = func(
iter *Iter,
column *ColumnMetadata,
indexOptionsJSON *[]byte,
) bool {
return iter.Scan(
&column.Table,
&column.Name,
&column.ClusteringOrder,
&column.Validator,
&column.Kind,
&column.ComponentIndex,
)
if len(indexOptionsJSON) > 0 {
err := json.Unmarshal(indexOptionsJSON, &column.Index.Options)
if err != nil {
return nil, fmt.Errorf(
"Invalid JSON value '%s' as index_options for column '%s' in table '%s': %v",
indexOptionsJSON,
column.Name,
column.Table,
err)
}
}
} else {
// V2+ supports the type column
stmt = `
columns = append(columns, column)
}
if err := rows.Err(); err != nil {
return nil, err
}
return columns, nil
}
func (s *Session) scanColumnMetadataV2(keyspace string) ([]ColumnMetadata, error) {
// V2+ supports the type column
const stmt = `
SELECT
columnfamily_name,
column_name,
@@ -620,57 +666,112 @@ func getColumnMetadata(
index_options,
type
FROM system.schema_columns
WHERE keyspace_name = ?
`
scan = func(
iter *Iter,
column *ColumnMetadata,
indexOptionsJSON *[]byte,
) bool {
return iter.Scan(
&column.Table,
&column.Name,
&column.ComponentIndex,
&column.Validator,
&column.Index.Name,
&column.Index.Type,
&indexOptionsJSON,
&column.Kind,
)
WHERE keyspace_name = ?`
var columns []ColumnMetadata
rows := s.control.query(stmt, keyspace).Scanner()
for rows.Next() {
var (
column = ColumnMetadata{Keyspace: keyspace}
indexOptionsJSON []byte
)
err := rows.Scan(&column.Table,
&column.Name,
&column.ComponentIndex,
&column.Validator,
&column.Index.Name,
&column.Index.Type,
&indexOptionsJSON,
&column.Kind,
)
if err != nil {
return nil, err
}
}
// get the columns metadata
columns := []ColumnMetadata{}
column := ColumnMetadata{Keyspace: keyspaceName}
var indexOptionsJSON []byte
iter := session.control.query(stmt, keyspaceName)
for scan(iter, &column, &indexOptionsJSON) {
var err error
// decode the index options
if indexOptionsJSON != nil {
err = json.Unmarshal(indexOptionsJSON, &column.Index.Options)
if len(indexOptionsJSON) > 0 {
err := json.Unmarshal(indexOptionsJSON, &column.Index.Options)
if err != nil {
iter.Close()
return nil, fmt.Errorf(
"Invalid JSON value '%s' as index_options for column '%s' in table '%s': %v",
indexOptionsJSON,
column.Name,
column.Table,
err,
)
err)
}
}
columns = append(columns, column)
column = ColumnMetadata{Keyspace: keyspaceName}
}
err := iter.Close()
if err := rows.Err(); err != nil {
return nil, err
}
return columns, nil
}
func (s *Session) scanColumnMetadataSystem(keyspace string) ([]ColumnMetadata, error) {
const stmt = `
SELECT
table_name,
column_name,
clustering_order,
type,
kind,
position
FROM system_schema.columns
WHERE keyspace_name = ?`
var columns []ColumnMetadata
rows := s.control.query(stmt, keyspace).Scanner()
for rows.Next() {
column := ColumnMetadata{Keyspace: keyspace}
err := rows.Scan(&column.Table,
&column.Name,
&column.ClusteringOrder,
&column.Validator,
&column.Kind,
&column.ComponentIndex,
)
if err != nil {
return nil, err
}
columns = append(columns, column)
}
if err := rows.Err(); err != nil {
return nil, err
}
// TODO(zariel): get column index info from system_schema.indexes
return columns, nil
}
// query for only the column metadata in the specified keyspace from system.schema_columns
func getColumnMetadata(session *Session, keyspaceName string) ([]ColumnMetadata, error) {
var (
columns []ColumnMetadata
err error
)
// Deal with differences in protocol versions
if session.cfg.ProtoVersion == 1 {
columns, err = session.scanColumnMetadataV1(keyspaceName)
} else if session.useSystemSchema { // Cassandra 3.x+
columns, err = session.scanColumnMetadataSystem(keyspaceName)
} else {
columns, err = session.scanColumnMetadataV2(keyspaceName)
}
if err != nil && err != ErrNotFound {
return nil, fmt.Errorf("Error querying column schema: %v", err)
}

View File

@@ -7,6 +7,7 @@ package gocql
import (
"fmt"
"log"
"net"
"sync"
"sync/atomic"
@@ -90,7 +91,7 @@ func (c *cowHostList) update(host *HostInfo) {
c.mu.Unlock()
}
func (c *cowHostList) remove(addr string) bool {
func (c *cowHostList) remove(ip net.IP) bool {
c.mu.Lock()
l := c.get()
size := len(l)
@@ -102,7 +103,7 @@ func (c *cowHostList) remove(addr string) bool {
found := false
newL := make([]*HostInfo, 0, size)
for i := 0; i < len(l); i++ {
if l[i].Peer() != addr {
if !l[i].Peer().Equal(ip) {
newL = append(newL, l[i])
} else {
found = true
@@ -161,9 +162,9 @@ func (s *SimpleRetryPolicy) Attempt(q RetryableQuery) bool {
type HostStateNotifier interface {
AddHost(host *HostInfo)
RemoveHost(addr string)
RemoveHost(host *HostInfo)
HostUp(host *HostInfo)
HostDown(addr string)
HostDown(host *HostInfo)
}
// HostSelectionPolicy is an interface for selecting
@@ -235,16 +236,16 @@ func (r *roundRobinHostPolicy) AddHost(host *HostInfo) {
r.hosts.add(host)
}
func (r *roundRobinHostPolicy) RemoveHost(addr string) {
r.hosts.remove(addr)
func (r *roundRobinHostPolicy) RemoveHost(host *HostInfo) {
r.hosts.remove(host.Peer())
}
func (r *roundRobinHostPolicy) HostUp(host *HostInfo) {
r.AddHost(host)
}
func (r *roundRobinHostPolicy) HostDown(addr string) {
r.RemoveHost(addr)
func (r *roundRobinHostPolicy) HostDown(host *HostInfo) {
r.RemoveHost(host)
}
// TokenAwareHostPolicy is a token aware host selection policy, where hosts are
@@ -278,9 +279,9 @@ func (t *tokenAwareHostPolicy) AddHost(host *HostInfo) {
t.resetTokenRing()
}
func (t *tokenAwareHostPolicy) RemoveHost(addr string) {
t.hosts.remove(addr)
t.fallback.RemoveHost(addr)
func (t *tokenAwareHostPolicy) RemoveHost(host *HostInfo) {
t.hosts.remove(host.Peer())
t.fallback.RemoveHost(host)
t.resetTokenRing()
}
@@ -289,8 +290,8 @@ func (t *tokenAwareHostPolicy) HostUp(host *HostInfo) {
t.AddHost(host)
}
func (t *tokenAwareHostPolicy) HostDown(addr string) {
t.RemoveHost(addr)
func (t *tokenAwareHostPolicy) HostDown(host *HostInfo) {
t.RemoveHost(host)
}
func (t *tokenAwareHostPolicy) resetTokenRing() {
@@ -393,8 +394,9 @@ func (r *hostPoolHostPolicy) SetHosts(hosts []*HostInfo) {
hostMap := make(map[string]*HostInfo, len(hosts))
for i, host := range hosts {
peers[i] = host.Peer()
hostMap[host.Peer()] = host
ip := host.Peer().String()
peers[i] = ip
hostMap[ip] = host
}
r.mu.Lock()
@@ -404,15 +406,17 @@ func (r *hostPoolHostPolicy) SetHosts(hosts []*HostInfo) {
}
func (r *hostPoolHostPolicy) AddHost(host *HostInfo) {
ip := host.Peer().String()
r.mu.Lock()
defer r.mu.Unlock()
// If the host addr is present and isn't nil return
if h, ok := r.hostMap[host.Peer()]; ok && h != nil{
if h, ok := r.hostMap[ip]; ok && h != nil {
return
}
// otherwise, add the host to the map
r.hostMap[host.Peer()] = host
r.hostMap[ip] = host
// and construct a new peer list to give to the HostPool
hosts := make([]string, 0, len(r.hostMap))
for addr := range r.hostMap {
@@ -420,21 +424,22 @@ func (r *hostPoolHostPolicy) AddHost(host *HostInfo) {
}
r.hp.SetHosts(hosts)
}
func (r *hostPoolHostPolicy) RemoveHost(addr string) {
func (r *hostPoolHostPolicy) RemoveHost(host *HostInfo) {
ip := host.Peer().String()
r.mu.Lock()
defer r.mu.Unlock()
if _, ok := r.hostMap[addr]; !ok {
if _, ok := r.hostMap[ip]; !ok {
return
}
delete(r.hostMap, addr)
delete(r.hostMap, ip)
hosts := make([]string, 0, len(r.hostMap))
for addr := range r.hostMap {
hosts = append(hosts, addr)
for _, host := range r.hostMap {
hosts = append(hosts, host.Peer().String())
}
r.hp.SetHosts(hosts)
@@ -444,8 +449,8 @@ func (r *hostPoolHostPolicy) HostUp(host *HostInfo) {
r.AddHost(host)
}
func (r *hostPoolHostPolicy) HostDown(addr string) {
r.RemoveHost(addr)
func (r *hostPoolHostPolicy) HostDown(host *HostInfo) {
r.RemoveHost(host)
}
func (r *hostPoolHostPolicy) SetPartitioner(partitioner string) {
@@ -488,10 +493,12 @@ func (host selectedHostPoolHost) Info() *HostInfo {
}
func (host selectedHostPoolHost) Mark(err error) {
ip := host.info.Peer().String()
host.policy.mu.RLock()
defer host.policy.mu.RUnlock()
if _, ok := host.policy.hostMap[host.info.Peer()]; !ok {
if _, ok := host.policy.hostMap[ip]; !ok {
// host was removed between pick and mark
return
}

View File

@@ -28,7 +28,7 @@ func (q *queryExecutor) executeQuery(qry ExecutableQuery) (*Iter, error) {
continue
}
pool, ok := q.pool.getPool(host.Peer())
pool, ok := q.pool.getPool(host)
if !ok {
continue
}

View File

@@ -1,6 +1,7 @@
package gocql
import (
"net"
"sync"
"sync/atomic"
)
@@ -9,7 +10,7 @@ type ring struct {
// endpoints are the set of endpoints which the driver will attempt to connect
// to in the case it can not reach any of its hosts. They are also used to boot
// strap the initial connection.
endpoints []string
endpoints []*HostInfo
// hosts are the set of all hosts in the cassandra ring that we know of
mu sync.RWMutex
@@ -34,9 +35,9 @@ func (r *ring) rrHost() *HostInfo {
return r.hostList[pos%len(r.hostList)]
}
func (r *ring) getHost(addr string) *HostInfo {
func (r *ring) getHost(ip net.IP) *HostInfo {
r.mu.RLock()
host := r.hosts[addr]
host := r.hosts[ip.String()]
r.mu.RUnlock()
return host
}
@@ -52,42 +53,66 @@ func (r *ring) allHosts() []*HostInfo {
}
func (r *ring) addHost(host *HostInfo) bool {
ip := host.Peer().String()
r.mu.Lock()
if r.hosts == nil {
r.hosts = make(map[string]*HostInfo)
}
addr := host.Peer()
_, ok := r.hosts[addr]
r.hosts[addr] = host
_, ok := r.hosts[ip]
if !ok {
r.hostList = append(r.hostList, host)
}
r.hosts[ip] = host
r.mu.Unlock()
return ok
}
func (r *ring) addOrUpdate(host *HostInfo) *HostInfo {
if existingHost, ok := r.addHostIfMissing(host); ok {
existingHost.update(host)
host = existingHost
}
return host
}
func (r *ring) addHostIfMissing(host *HostInfo) (*HostInfo, bool) {
ip := host.Peer().String()
r.mu.Lock()
if r.hosts == nil {
r.hosts = make(map[string]*HostInfo)
}
addr := host.Peer()
existing, ok := r.hosts[addr]
existing, ok := r.hosts[ip]
if !ok {
r.hosts[addr] = host
r.hosts[ip] = host
existing = host
r.hostList = append(r.hostList, host)
}
r.mu.Unlock()
return existing, ok
}
func (r *ring) removeHost(addr string) bool {
func (r *ring) removeHost(ip net.IP) bool {
r.mu.Lock()
if r.hosts == nil {
r.hosts = make(map[string]*HostInfo)
}
_, ok := r.hosts[addr]
delete(r.hosts, addr)
k := ip.String()
_, ok := r.hosts[k]
if ok {
for i, host := range r.hostList {
if host.Peer().Equal(ip) {
r.hostList = append(r.hostList[:i], r.hostList[i+1:]...)
break
}
}
}
delete(r.hosts, k)
r.mu.Unlock()
return ok
}

Some files were not shown because too many files have changed in this diff Show More