mirror of
https://github.com/optim-enterprises-bv/vault.git
synced 2025-11-26 14:55:01 +00:00
Bump deps
This commit is contained in:
2
vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/decode.go
generated
vendored
2
vendor/github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute/decode.go
generated
vendored
@@ -241,7 +241,7 @@ func (d *Decoder) decodeBinary(b []byte, v reflect.Value) error {
|
||||
func (d *Decoder) decodeBool(b *bool, v reflect.Value) error {
|
||||
switch v.Kind() {
|
||||
case reflect.Bool, reflect.Interface:
|
||||
v.Set(reflect.ValueOf(*b))
|
||||
v.Set(reflect.ValueOf(*b).Convert(v.Type()))
|
||||
default:
|
||||
return &UnmarshalTypeError{Value: "bool", Type: v.Type()}
|
||||
}
|
||||
|
||||
163
vendor/github.com/circonus-labs/circonus-gometrics/api/README.md
generated
vendored
Normal file
163
vendor/github.com/circonus-labs/circonus-gometrics/api/README.md
generated
vendored
Normal file
@@ -0,0 +1,163 @@
|
||||
## Circonus API package
|
||||
|
||||
Full api documentation (for using *this* package) is available at [godoc.org](https://godoc.org/github.com/circonus-labs/circonus-gometrics/api). Links in the lists below go directly to the generic Circonus API documentation for the endpoint.
|
||||
|
||||
### Straight [raw] API access
|
||||
|
||||
* Get
|
||||
* Post (for creates)
|
||||
* Put (for updates)
|
||||
* Delete
|
||||
|
||||
### Helpers for currently supported API endpoints
|
||||
|
||||
> Note, these interfaces are still being actively developed. For example, many of the `New*` methods only return an empty struct; sensible defaults will be added going forward. Other, common helper methods for the various endpoints may be added as use cases emerge. The organization
|
||||
of the API may change if common use contexts would benefit significantly.
|
||||
|
||||
* [Account](https://login.circonus.com/resources/api/calls/account)
|
||||
* FetchAccount
|
||||
* FetchAccounts
|
||||
* UpdateAccount
|
||||
* SearchAccounts
|
||||
* [Acknowledgement](https://login.circonus.com/resources/api/calls/acknowledgement)
|
||||
* NewAcknowledgement
|
||||
* FetchAcknowledgement
|
||||
* FetchAcknowledgements
|
||||
* UpdateAcknowledgement
|
||||
* CreateAcknowledgement
|
||||
* DeleteAcknowledgement
|
||||
* DeleteAcknowledgementByCID
|
||||
* SearchAcknowledgements
|
||||
* [Alert](https://login.circonus.com/resources/api/calls/alert)
|
||||
* FetchAlert
|
||||
* FetchAlerts
|
||||
* SearchAlerts
|
||||
* [Annotation](https://login.circonus.com/resources/api/calls/annotation)
|
||||
* NewAnnotation
|
||||
* FetchAnnotation
|
||||
* FetchAnnotations
|
||||
* UpdateAnnotation
|
||||
* CreateAnnotation
|
||||
* DeleteAnnotation
|
||||
* DeleteAnnotationByCID
|
||||
* SearchAnnotations
|
||||
* [Broker](https://login.circonus.com/resources/api/calls/broker)
|
||||
* FetchBroker
|
||||
* FetchBrokers
|
||||
* SearchBrokers
|
||||
* [Check Bundle](https://login.circonus.com/resources/api/calls/check_bundle)
|
||||
* NewCheckBundle
|
||||
* FetchCheckBundle
|
||||
* FetchCheckBundles
|
||||
* UpdateCheckBundle
|
||||
* CreateCheckBundle
|
||||
* DeleteCheckBundle
|
||||
* DeleteCheckBundleByCID
|
||||
* SearchCheckBundles
|
||||
* [Check Bundle Metrics](https://login.circonus.com/resources/api/calls/check_bundle_metrics)
|
||||
* FetchCheckBundleMetrics
|
||||
* UpdateCheckBundleMetrics
|
||||
* [Check](https://login.circonus.com/resources/api/calls/check)
|
||||
* FetchCheck
|
||||
* FetchChecks
|
||||
* SearchChecks
|
||||
* [Contact Group](https://login.circonus.com/resources/api/calls/contact_group)
|
||||
* NewContactGroup
|
||||
* FetchContactGroup
|
||||
* FetchContactGroups
|
||||
* UpdateContactGroup
|
||||
* CreateContactGroup
|
||||
* DeleteContactGroup
|
||||
* DeleteContactGroupByCID
|
||||
* SearchContactGroups
|
||||
* [Dashboard](https://login.circonus.com/resources/api/calls/dashboard) -- note, this is a work in progress, the methods/types may still change
|
||||
* NewDashboard
|
||||
* FetchDashboard
|
||||
* FetchDashboards
|
||||
* UpdateDashboard
|
||||
* CreateDashboard
|
||||
* DeleteDashboard
|
||||
* DeleteDashboardByCID
|
||||
* SearchDashboards
|
||||
* [Graph](https://login.circonus.com/resources/api/calls/graph)
|
||||
* NewGraph
|
||||
* FetchGraph
|
||||
* FetchGraphs
|
||||
* UpdateGraph
|
||||
* CreateGraph
|
||||
* DeleteGraph
|
||||
* DeleteGraphByCID
|
||||
* SearchGraphs
|
||||
* [Metric Cluster](https://login.circonus.com/resources/api/calls/metric_cluster)
|
||||
* NewMetricCluster
|
||||
* FetchMetricCluster
|
||||
* FetchMetricClusters
|
||||
* UpdateMetricCluster
|
||||
* CreateMetricCluster
|
||||
* DeleteMetricCluster
|
||||
* DeleteMetricClusterByCID
|
||||
* SearchMetricClusters
|
||||
* [Metric](https://login.circonus.com/resources/api/calls/metric)
|
||||
* FetchMetric
|
||||
* FetchMetrics
|
||||
* UpdateMetric
|
||||
* SearchMetrics
|
||||
* [Maintenance window](https://login.circonus.com/resources/api/calls/maintenance)
|
||||
* NewMaintenanceWindow
|
||||
* FetchMaintenanceWindow
|
||||
* FetchMaintenanceWindows
|
||||
* UpdateMaintenanceWindow
|
||||
* CreateMaintenanceWindow
|
||||
* DeleteMaintenanceWindow
|
||||
* DeleteMaintenanceWindowByCID
|
||||
* SearchMaintenanceWindows
|
||||
* [Outlier Report](https://login.circonus.com/resources/api/calls/outlier_report)
|
||||
* NewOutlierReport
|
||||
* FetchOutlierReport
|
||||
* FetchOutlierReports
|
||||
* UpdateOutlierReport
|
||||
* CreateOutlierReport
|
||||
* DeleteOutlierReport
|
||||
* DeleteOutlierReportByCID
|
||||
* SearchOutlierReports
|
||||
* [Provision Broker](https://login.circonus.com/resources/api/calls/provision_broker)
|
||||
* NewProvisionBroker
|
||||
* FetchProvisionBroker
|
||||
* UpdateProvisionBroker
|
||||
* CreateProvisionBroker
|
||||
* [Rule Set](https://login.circonus.com/resources/api/calls/rule_set)
|
||||
* NewRuleset
|
||||
* FetchRuleset
|
||||
* FetchRulesets
|
||||
* UpdateRuleset
|
||||
* CreateRuleset
|
||||
* DeleteRuleset
|
||||
* DeleteRulesetByCID
|
||||
* SearchRulesets
|
||||
* [Rule Set Group](https://login.circonus.com/resources/api/calls/rule_set_group)
|
||||
* NewRulesetGroup
|
||||
* FetchRulesetGroup
|
||||
* FetchRulesetGroups
|
||||
* UpdateRulesetGroup
|
||||
* CreateRulesetGroup
|
||||
* DeleteRulesetGroup
|
||||
* DeleteRulesetGroupByCID
|
||||
* SearchRulesetGroups
|
||||
* [User](https://login.circonus.com/resources/api/calls/user)
|
||||
* FetchUser
|
||||
* FetchUsers
|
||||
* UpdateUser
|
||||
* SearchUsers
|
||||
* [Worksheet](https://login.circonus.com/resources/api/calls/worksheet)
|
||||
* NewWorksheet
|
||||
* FetchWorksheet
|
||||
* FetchWorksheets
|
||||
* UpdateWorksheet
|
||||
* CreateWorksheet
|
||||
* DeleteWorksheet
|
||||
* DeleteWorksheetByCID
|
||||
* SearchWorksheets
|
||||
|
||||
---
|
||||
|
||||
Unless otherwise noted, the source files are distributed under the BSD-style license found in the LICENSE file.
|
||||
181
vendor/github.com/circonus-labs/circonus-gometrics/api/account.go
generated
vendored
Normal file
181
vendor/github.com/circonus-labs/circonus-gometrics/api/account.go
generated
vendored
Normal file
@@ -0,0 +1,181 @@
|
||||
// Copyright 2016 Circonus, Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Account API support - Fetch and Update
|
||||
// See: https://login.circonus.com/resources/api/calls/account
|
||||
// Note: Create and Delete are not supported for Accounts via the API
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
|
||||
"github.com/circonus-labs/circonus-gometrics/api/config"
|
||||
)
|
||||
|
||||
// AccountLimit defines a usage limit imposed on account
|
||||
type AccountLimit struct {
|
||||
Limit uint `json:"_limit,omitempty"` // uint >=0
|
||||
Type string `json:"_type,omitempty"` // string
|
||||
Used uint `json:"_used,omitempty"` // uint >=0
|
||||
}
|
||||
|
||||
// AccountInvite defines outstanding invites
|
||||
type AccountInvite struct {
|
||||
Email string `json:"email"` // string
|
||||
Role string `json:"role"` // string
|
||||
}
|
||||
|
||||
// AccountUser defines current users
|
||||
type AccountUser struct {
|
||||
Role string `json:"role"` // string
|
||||
UserCID string `json:"user"` // string
|
||||
}
|
||||
|
||||
// Account defines an account. See https://login.circonus.com/resources/api/calls/account for more information.
|
||||
type Account struct {
|
||||
Address1 *string `json:"address1,omitempty"` // string or null
|
||||
Address2 *string `json:"address2,omitempty"` // string or null
|
||||
CCEmail *string `json:"cc_email,omitempty"` // string or null
|
||||
CID string `json:"_cid,omitempty"` // string
|
||||
City *string `json:"city,omitempty"` // string or null
|
||||
ContactGroups []string `json:"_contact_groups,omitempty"` // [] len >= 0
|
||||
Country string `json:"country_code,omitempty"` // string
|
||||
Description *string `json:"description,omitempty"` // string or null
|
||||
Invites []AccountInvite `json:"invites,omitempty"` // [] len >= 0
|
||||
Name string `json:"name,omitempty"` // string
|
||||
OwnerCID string `json:"_owner,omitempty"` // string
|
||||
StateProv *string `json:"state_prov,omitempty"` // string or null
|
||||
Timezone string `json:"timezone,omitempty"` // string
|
||||
UIBaseURL string `json:"_ui_base_url,omitempty"` // string
|
||||
Usage []AccountLimit `json:"_usage,omitempty"` // [] len >= 0
|
||||
Users []AccountUser `json:"users,omitempty"` // [] len >= 0
|
||||
}
|
||||
|
||||
// FetchAccount retrieves account with passed cid. Pass nil for '/account/current'.
|
||||
func (a *API) FetchAccount(cid CIDType) (*Account, error) {
|
||||
var accountCID string
|
||||
|
||||
if cid == nil || *cid == "" {
|
||||
accountCID = config.AccountPrefix + "/current"
|
||||
} else {
|
||||
accountCID = string(*cid)
|
||||
}
|
||||
|
||||
matched, err := regexp.MatchString(config.AccountCIDRegex, accountCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !matched {
|
||||
return nil, fmt.Errorf("Invalid account CID [%s]", accountCID)
|
||||
}
|
||||
|
||||
result, err := a.Get(accountCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if a.Debug {
|
||||
a.Log.Printf("[DEBUG] account fetch, received JSON: %s", string(result))
|
||||
}
|
||||
|
||||
account := new(Account)
|
||||
if err := json.Unmarshal(result, account); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return account, nil
|
||||
}
|
||||
|
||||
// FetchAccounts retrieves all accounts available to the API Token.
|
||||
func (a *API) FetchAccounts() (*[]Account, error) {
|
||||
result, err := a.Get(config.AccountPrefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var accounts []Account
|
||||
if err := json.Unmarshal(result, &accounts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &accounts, nil
|
||||
}
|
||||
|
||||
// UpdateAccount updates passed account.
|
||||
func (a *API) UpdateAccount(cfg *Account) (*Account, error) {
|
||||
if cfg == nil {
|
||||
return nil, fmt.Errorf("Invalid account config [nil]")
|
||||
}
|
||||
|
||||
accountCID := string(cfg.CID)
|
||||
|
||||
matched, err := regexp.MatchString(config.AccountCIDRegex, accountCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !matched {
|
||||
return nil, fmt.Errorf("Invalid account CID [%s]", accountCID)
|
||||
}
|
||||
|
||||
jsonCfg, err := json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if a.Debug {
|
||||
a.Log.Printf("[DEBUG] account update, sending JSON: %s", string(jsonCfg))
|
||||
}
|
||||
|
||||
result, err := a.Put(accountCID, jsonCfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
account := &Account{}
|
||||
if err := json.Unmarshal(result, account); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return account, nil
|
||||
}
|
||||
|
||||
// SearchAccounts returns accounts matching a filter (search queries are not
|
||||
// suppoted by the account endpoint). Pass nil as filter for all accounts the
|
||||
// API Token can access.
|
||||
func (a *API) SearchAccounts(filterCriteria *SearchFilterType) (*[]Account, error) {
|
||||
q := url.Values{}
|
||||
|
||||
if filterCriteria != nil && len(*filterCriteria) > 0 {
|
||||
for filter, criteria := range *filterCriteria {
|
||||
for _, val := range criteria {
|
||||
q.Add(filter, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if q.Encode() == "" {
|
||||
return a.FetchAccounts()
|
||||
}
|
||||
|
||||
reqURL := url.URL{
|
||||
Path: config.AccountPrefix,
|
||||
RawQuery: q.Encode(),
|
||||
}
|
||||
|
||||
result, err := a.Get(reqURL.String())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("[ERROR] API call error %+v", err)
|
||||
}
|
||||
|
||||
var accounts []Account
|
||||
if err := json.Unmarshal(result, &accounts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &accounts, nil
|
||||
}
|
||||
190
vendor/github.com/circonus-labs/circonus-gometrics/api/acknowledgement.go
generated
vendored
Normal file
190
vendor/github.com/circonus-labs/circonus-gometrics/api/acknowledgement.go
generated
vendored
Normal file
@@ -0,0 +1,190 @@
|
||||
// Copyright 2016 Circonus, Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Acknowledgement API support - Fetch, Create, Update, Delete*, and Search
|
||||
// See: https://login.circonus.com/resources/api/calls/acknowledgement
|
||||
// * : delete (cancel) by updating with AcknowledgedUntil set to 0
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
|
||||
"github.com/circonus-labs/circonus-gometrics/api/config"
|
||||
)
|
||||
|
||||
// Acknowledgement defines a acknowledgement. See https://login.circonus.com/resources/api/calls/acknowledgement for more information.
|
||||
type Acknowledgement struct {
|
||||
AcknowledgedBy string `json:"_acknowledged_by,omitempty"` // string
|
||||
AcknowledgedOn uint `json:"_acknowledged_on,omitempty"` // uint
|
||||
AcknowledgedUntil interface{} `json:"acknowledged_until,omitempty"` // NOTE received as uint; can be set using string or uint
|
||||
Active bool `json:"_active,omitempty"` // bool
|
||||
AlertCID string `json:"alert,omitempty"` // string
|
||||
CID string `json:"_cid,omitempty"` // string
|
||||
LastModified uint `json:"_last_modified,omitempty"` // uint
|
||||
LastModifiedBy string `json:"_last_modified_by,omitempty"` // string
|
||||
Notes string `json:"notes,omitempty"` // string
|
||||
}
|
||||
|
||||
// NewAcknowledgement returns new Acknowledgement (with defaults, if applicable).
|
||||
func NewAcknowledgement() *Acknowledgement {
|
||||
return &Acknowledgement{}
|
||||
}
|
||||
|
||||
// FetchAcknowledgement retrieves acknowledgement with passed cid.
|
||||
func (a *API) FetchAcknowledgement(cid CIDType) (*Acknowledgement, error) {
|
||||
if cid == nil || *cid == "" {
|
||||
return nil, fmt.Errorf("Invalid acknowledgement CID [none]")
|
||||
}
|
||||
|
||||
acknowledgementCID := string(*cid)
|
||||
|
||||
matched, err := regexp.MatchString(config.AcknowledgementCIDRegex, acknowledgementCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !matched {
|
||||
return nil, fmt.Errorf("Invalid acknowledgement CID [%s]", acknowledgementCID)
|
||||
}
|
||||
|
||||
result, err := a.Get(acknowledgementCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if a.Debug {
|
||||
a.Log.Printf("[DEBUG] acknowledgement fetch, received JSON: %s", string(result))
|
||||
}
|
||||
|
||||
acknowledgement := &Acknowledgement{}
|
||||
if err := json.Unmarshal(result, acknowledgement); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return acknowledgement, nil
|
||||
}
|
||||
|
||||
// FetchAcknowledgements retrieves all acknowledgements available to the API Token.
|
||||
func (a *API) FetchAcknowledgements() (*[]Acknowledgement, error) {
|
||||
result, err := a.Get(config.AcknowledgementPrefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var acknowledgements []Acknowledgement
|
||||
if err := json.Unmarshal(result, &acknowledgements); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &acknowledgements, nil
|
||||
}
|
||||
|
||||
// UpdateAcknowledgement updates passed acknowledgement.
|
||||
func (a *API) UpdateAcknowledgement(cfg *Acknowledgement) (*Acknowledgement, error) {
|
||||
if cfg == nil {
|
||||
return nil, fmt.Errorf("Invalid acknowledgement config [nil]")
|
||||
}
|
||||
|
||||
acknowledgementCID := string(cfg.CID)
|
||||
|
||||
matched, err := regexp.MatchString(config.AcknowledgementCIDRegex, acknowledgementCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !matched {
|
||||
return nil, fmt.Errorf("Invalid acknowledgement CID [%s]", acknowledgementCID)
|
||||
}
|
||||
|
||||
jsonCfg, err := json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if a.Debug {
|
||||
a.Log.Printf("[DEBUG] acknowledgement update, sending JSON: %s", string(jsonCfg))
|
||||
}
|
||||
|
||||
result, err := a.Put(acknowledgementCID, jsonCfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
acknowledgement := &Acknowledgement{}
|
||||
if err := json.Unmarshal(result, acknowledgement); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return acknowledgement, nil
|
||||
}
|
||||
|
||||
// CreateAcknowledgement creates a new acknowledgement.
|
||||
func (a *API) CreateAcknowledgement(cfg *Acknowledgement) (*Acknowledgement, error) {
|
||||
if cfg == nil {
|
||||
return nil, fmt.Errorf("Invalid acknowledgement config [nil]")
|
||||
}
|
||||
|
||||
jsonCfg, err := json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result, err := a.Post(config.AcknowledgementPrefix, jsonCfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if a.Debug {
|
||||
a.Log.Printf("[DEBUG] acknowledgement create, sending JSON: %s", string(jsonCfg))
|
||||
}
|
||||
|
||||
acknowledgement := &Acknowledgement{}
|
||||
if err := json.Unmarshal(result, acknowledgement); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return acknowledgement, nil
|
||||
}
|
||||
|
||||
// SearchAcknowledgements returns acknowledgements matching
|
||||
// the specified search query and/or filter. If nil is passed for
|
||||
// both parameters all acknowledgements will be returned.
|
||||
func (a *API) SearchAcknowledgements(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]Acknowledgement, error) {
|
||||
q := url.Values{}
|
||||
|
||||
if searchCriteria != nil && *searchCriteria != "" {
|
||||
q.Set("search", string(*searchCriteria))
|
||||
}
|
||||
|
||||
if filterCriteria != nil && len(*filterCriteria) > 0 {
|
||||
for filter, criteria := range *filterCriteria {
|
||||
for _, val := range criteria {
|
||||
q.Add(filter, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if q.Encode() == "" {
|
||||
return a.FetchAcknowledgements()
|
||||
}
|
||||
|
||||
reqURL := url.URL{
|
||||
Path: config.AcknowledgementPrefix,
|
||||
RawQuery: q.Encode(),
|
||||
}
|
||||
|
||||
result, err := a.Get(reqURL.String())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("[ERROR] API call error %+v", err)
|
||||
}
|
||||
|
||||
var acknowledgements []Acknowledgement
|
||||
if err := json.Unmarshal(result, &acknowledgements); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &acknowledgements, nil
|
||||
}
|
||||
131
vendor/github.com/circonus-labs/circonus-gometrics/api/alert.go
generated
vendored
Normal file
131
vendor/github.com/circonus-labs/circonus-gometrics/api/alert.go
generated
vendored
Normal file
@@ -0,0 +1,131 @@
|
||||
// Copyright 2016 Circonus, Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Alert API support - Fetch and Search
|
||||
// See: https://login.circonus.com/resources/api/calls/alert
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
|
||||
"github.com/circonus-labs/circonus-gometrics/api/config"
|
||||
)
|
||||
|
||||
// Alert defines a alert. See https://login.circonus.com/resources/api/calls/alert for more information.
|
||||
type Alert struct {
|
||||
AcknowledgementCID *string `json:"_acknowledgement,omitempty"` // string or null
|
||||
AlertURL string `json:"_alert_url,omitempty"` // string
|
||||
BrokerCID string `json:"_broker,omitempty"` // string
|
||||
CheckCID string `json:"_check,omitempty"` // string
|
||||
CheckName string `json:"_check_name,omitempty"` // string
|
||||
CID string `json:"_cid,omitempty"` // string
|
||||
ClearedOn *uint `json:"_cleared_on,omitempty"` // uint or null
|
||||
ClearedValue *string `json:"_cleared_value,omitempty"` // string or null
|
||||
Maintenance []string `json:"_maintenance,omitempty"` // [] len >= 0
|
||||
MetricLinkURL *string `json:"_metric_link,omitempty"` // string or null
|
||||
MetricName string `json:"_metric_name,omitempty"` // string
|
||||
MetricNotes *string `json:"_metric_notes,omitempty"` // string or null
|
||||
OccurredOn uint `json:"_occurred_on,omitempty"` // uint
|
||||
RuleSetCID string `json:"_rule_set,omitempty"` // string
|
||||
Severity uint `json:"_severity,omitempty"` // uint
|
||||
Tags []string `json:"_tags,omitempty"` // [] len >= 0
|
||||
Value string `json:"_value,omitempty"` // string
|
||||
}
|
||||
|
||||
// NewAlert returns a new alert (with defaults, if applicable)
|
||||
func NewAlert() *Alert {
|
||||
return &Alert{}
|
||||
}
|
||||
|
||||
// FetchAlert retrieves alert with passed cid.
|
||||
func (a *API) FetchAlert(cid CIDType) (*Alert, error) {
|
||||
if cid == nil || *cid == "" {
|
||||
return nil, fmt.Errorf("Invalid alert CID [none]")
|
||||
}
|
||||
|
||||
alertCID := string(*cid)
|
||||
|
||||
matched, err := regexp.MatchString(config.AlertCIDRegex, alertCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !matched {
|
||||
return nil, fmt.Errorf("Invalid alert CID [%s]", alertCID)
|
||||
}
|
||||
|
||||
result, err := a.Get(alertCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if a.Debug {
|
||||
a.Log.Printf("[DEBUG] fetch alert, received JSON: %s", string(result))
|
||||
}
|
||||
|
||||
alert := &Alert{}
|
||||
if err := json.Unmarshal(result, alert); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return alert, nil
|
||||
}
|
||||
|
||||
// FetchAlerts retrieves all alerts available to the API Token.
|
||||
func (a *API) FetchAlerts() (*[]Alert, error) {
|
||||
result, err := a.Get(config.AlertPrefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var alerts []Alert
|
||||
if err := json.Unmarshal(result, &alerts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &alerts, nil
|
||||
}
|
||||
|
||||
// SearchAlerts returns alerts matching the specified search query
|
||||
// and/or filter. If nil is passed for both parameters all alerts
|
||||
// will be returned.
|
||||
func (a *API) SearchAlerts(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]Alert, error) {
|
||||
q := url.Values{}
|
||||
|
||||
if searchCriteria != nil && *searchCriteria != "" {
|
||||
q.Set("search", string(*searchCriteria))
|
||||
}
|
||||
|
||||
if filterCriteria != nil && len(*filterCriteria) > 0 {
|
||||
for filter, criteria := range *filterCriteria {
|
||||
for _, val := range criteria {
|
||||
q.Add(filter, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if q.Encode() == "" {
|
||||
return a.FetchAlerts()
|
||||
}
|
||||
|
||||
reqURL := url.URL{
|
||||
Path: config.AlertPrefix,
|
||||
RawQuery: q.Encode(),
|
||||
}
|
||||
|
||||
result, err := a.Get(reqURL.String())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("[ERROR] API call error %+v", err)
|
||||
}
|
||||
|
||||
var alerts []Alert
|
||||
if err := json.Unmarshal(result, &alerts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &alerts, nil
|
||||
}
|
||||
223
vendor/github.com/circonus-labs/circonus-gometrics/api/annotation.go
generated
vendored
Normal file
223
vendor/github.com/circonus-labs/circonus-gometrics/api/annotation.go
generated
vendored
Normal file
@@ -0,0 +1,223 @@
|
||||
// Copyright 2016 Circonus, Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Annotation API support - Fetch, Create, Update, Delete, and Search
|
||||
// See: https://login.circonus.com/resources/api/calls/annotation
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
|
||||
"github.com/circonus-labs/circonus-gometrics/api/config"
|
||||
)
|
||||
|
||||
// Annotation defines a annotation. See https://login.circonus.com/resources/api/calls/annotation for more information.
|
||||
type Annotation struct {
|
||||
Category string `json:"category"` // string
|
||||
CID string `json:"_cid,omitempty"` // string
|
||||
Created uint `json:"_created,omitempty"` // uint
|
||||
Description string `json:"description"` // string
|
||||
LastModified uint `json:"_last_modified,omitempty"` // uint
|
||||
LastModifiedBy string `json:"_last_modified_by,omitempty"` // string
|
||||
RelatedMetrics []string `json:"rel_metrics"` // [] len >= 0
|
||||
Start uint `json:"start"` // uint
|
||||
Stop uint `json:"stop"` // uint
|
||||
Title string `json:"title"` // string
|
||||
}
|
||||
|
||||
// NewAnnotation returns a new Annotation (with defaults, if applicable)
|
||||
func NewAnnotation() *Annotation {
|
||||
return &Annotation{}
|
||||
}
|
||||
|
||||
// FetchAnnotation retrieves annotation with passed cid.
|
||||
func (a *API) FetchAnnotation(cid CIDType) (*Annotation, error) {
|
||||
if cid == nil || *cid == "" {
|
||||
return nil, fmt.Errorf("Invalid annotation CID [none]")
|
||||
}
|
||||
|
||||
annotationCID := string(*cid)
|
||||
|
||||
matched, err := regexp.MatchString(config.AnnotationCIDRegex, annotationCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !matched {
|
||||
return nil, fmt.Errorf("Invalid annotation CID [%s]", annotationCID)
|
||||
}
|
||||
|
||||
result, err := a.Get(annotationCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if a.Debug {
|
||||
a.Log.Printf("[DEBUG] fetch annotation, received JSON: %s", string(result))
|
||||
}
|
||||
|
||||
annotation := &Annotation{}
|
||||
if err := json.Unmarshal(result, annotation); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return annotation, nil
|
||||
}
|
||||
|
||||
// FetchAnnotations retrieves all annotations available to the API Token.
|
||||
func (a *API) FetchAnnotations() (*[]Annotation, error) {
|
||||
result, err := a.Get(config.AnnotationPrefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var annotations []Annotation
|
||||
if err := json.Unmarshal(result, &annotations); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &annotations, nil
|
||||
}
|
||||
|
||||
// UpdateAnnotation updates passed annotation.
|
||||
func (a *API) UpdateAnnotation(cfg *Annotation) (*Annotation, error) {
|
||||
if cfg == nil {
|
||||
return nil, fmt.Errorf("Invalid annotation config [nil]")
|
||||
}
|
||||
|
||||
annotationCID := string(cfg.CID)
|
||||
|
||||
matched, err := regexp.MatchString(config.AnnotationCIDRegex, annotationCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !matched {
|
||||
return nil, fmt.Errorf("Invalid annotation CID [%s]", annotationCID)
|
||||
}
|
||||
|
||||
jsonCfg, err := json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if a.Debug {
|
||||
a.Log.Printf("[DEBUG] update annotation, sending JSON: %s", string(jsonCfg))
|
||||
}
|
||||
|
||||
result, err := a.Put(annotationCID, jsonCfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
annotation := &Annotation{}
|
||||
if err := json.Unmarshal(result, annotation); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return annotation, nil
|
||||
}
|
||||
|
||||
// CreateAnnotation creates a new annotation.
|
||||
func (a *API) CreateAnnotation(cfg *Annotation) (*Annotation, error) {
|
||||
if cfg == nil {
|
||||
return nil, fmt.Errorf("Invalid annotation config [nil]")
|
||||
}
|
||||
|
||||
jsonCfg, err := json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if a.Debug {
|
||||
a.Log.Printf("[DEBUG] create annotation, sending JSON: %s", string(jsonCfg))
|
||||
}
|
||||
|
||||
result, err := a.Post(config.AnnotationPrefix, jsonCfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
annotation := &Annotation{}
|
||||
if err := json.Unmarshal(result, annotation); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return annotation, nil
|
||||
}
|
||||
|
||||
// DeleteAnnotation deletes passed annotation.
|
||||
func (a *API) DeleteAnnotation(cfg *Annotation) (bool, error) {
|
||||
if cfg == nil {
|
||||
return false, fmt.Errorf("Invalid annotation config [nil]")
|
||||
}
|
||||
|
||||
return a.DeleteAnnotationByCID(CIDType(&cfg.CID))
|
||||
}
|
||||
|
||||
// DeleteAnnotationByCID deletes annotation with passed cid.
|
||||
func (a *API) DeleteAnnotationByCID(cid CIDType) (bool, error) {
|
||||
if cid == nil || *cid == "" {
|
||||
return false, fmt.Errorf("Invalid annotation CID [none]")
|
||||
}
|
||||
|
||||
annotationCID := string(*cid)
|
||||
|
||||
matched, err := regexp.MatchString(config.AnnotationCIDRegex, annotationCID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !matched {
|
||||
return false, fmt.Errorf("Invalid annotation CID [%s]", annotationCID)
|
||||
}
|
||||
|
||||
_, err = a.Delete(annotationCID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// SearchAnnotations returns annotations matching the specified
|
||||
// search query and/or filter. If nil is passed for both parameters
|
||||
// all annotations will be returned.
|
||||
func (a *API) SearchAnnotations(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]Annotation, error) {
|
||||
q := url.Values{}
|
||||
|
||||
if searchCriteria != nil && *searchCriteria != "" {
|
||||
q.Set("search", string(*searchCriteria))
|
||||
}
|
||||
|
||||
if filterCriteria != nil && len(*filterCriteria) > 0 {
|
||||
for filter, criteria := range *filterCriteria {
|
||||
for _, val := range criteria {
|
||||
q.Add(filter, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if q.Encode() == "" {
|
||||
return a.FetchAnnotations()
|
||||
}
|
||||
|
||||
reqURL := url.URL{
|
||||
Path: config.AnnotationPrefix,
|
||||
RawQuery: q.Encode(),
|
||||
}
|
||||
|
||||
result, err := a.Get(reqURL.String())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("[ERROR] API call error %+v", err)
|
||||
}
|
||||
|
||||
var annotations []Annotation
|
||||
if err := json.Unmarshal(result, &annotations); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &annotations, nil
|
||||
}
|
||||
27
vendor/github.com/circonus-labs/circonus-gometrics/api/api.go
generated
vendored
27
vendor/github.com/circonus-labs/circonus-gometrics/api/api.go
generated
vendored
@@ -2,7 +2,6 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package api provides methods for interacting with the Circonus API
|
||||
package api
|
||||
|
||||
import (
|
||||
@@ -35,20 +34,20 @@ type TokenKeyType string
|
||||
// TokenAppType - Circonus API Token app name
|
||||
type TokenAppType string
|
||||
|
||||
// IDType Circonus object id (numeric portion of cid)
|
||||
type IDType int
|
||||
|
||||
// CIDType Circonus object cid
|
||||
type CIDType string
|
||||
type CIDType *string
|
||||
|
||||
// IDType Circonus object id
|
||||
type IDType int
|
||||
|
||||
// URLType submission url type
|
||||
type URLType string
|
||||
|
||||
// SearchQueryType search query
|
||||
// SearchQueryType search query (see: https://login.circonus.com/resources/api#searching)
|
||||
type SearchQueryType string
|
||||
|
||||
// SearchFilterType search filter
|
||||
type SearchFilterType string
|
||||
// SearchFilterType search filter (see: https://login.circonus.com/resources/api#filtering)
|
||||
type SearchFilterType map[string][]string
|
||||
|
||||
// TagType search/select/custom tag(s) type
|
||||
type TagType []string
|
||||
@@ -71,8 +70,18 @@ type API struct {
|
||||
Log *log.Logger
|
||||
}
|
||||
|
||||
// NewAPI returns a new Circonus API
|
||||
// NewClient returns a new Circonus API (alias for New)
|
||||
func NewClient(ac *Config) (*API, error) {
|
||||
return New(ac)
|
||||
}
|
||||
|
||||
// NewAPI returns a new Circonus API (alias for New)
|
||||
func NewAPI(ac *Config) (*API, error) {
|
||||
return New(ac)
|
||||
}
|
||||
|
||||
// New returns a new Circonus API
|
||||
func New(ac *Config) (*API, error) {
|
||||
|
||||
if ac == nil {
|
||||
return nil, errors.New("Invalid API configuration (nil)")
|
||||
|
||||
151
vendor/github.com/circonus-labs/circonus-gometrics/api/broker.go
generated
vendored
151
vendor/github.com/circonus-labs/circonus-gometrics/api/broker.go
generated
vendored
@@ -2,6 +2,9 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Broker API support - Fetch and Search
|
||||
// See: https://login.circonus.com/resources/api/calls/broker
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
@@ -9,58 +12,59 @@ import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/circonus-labs/circonus-gometrics/api/config"
|
||||
)
|
||||
|
||||
// BrokerDetail instance attributes
|
||||
// BrokerDetail defines instance attributes
|
||||
type BrokerDetail struct {
|
||||
CN string `json:"cn"`
|
||||
ExternalHost string `json:"external_host"`
|
||||
ExternalPort int `json:"external_port"`
|
||||
IP string `json:"ipaddress"`
|
||||
MinVer int `json:"minimum_version_required"`
|
||||
Modules []string `json:"modules"`
|
||||
Port int `json:"port"`
|
||||
Skew string `json:"skew"`
|
||||
Status string `json:"status"`
|
||||
Version int `json:"version"`
|
||||
CN string `json:"cn"` // string
|
||||
ExternalHost *string `json:"external_host"` // string or null
|
||||
ExternalPort uint16 `json:"external_port"` // uint16
|
||||
IP *string `json:"ipaddress"` // string or null
|
||||
MinVer uint `json:"minimum_version_required"` // uint
|
||||
Modules []string `json:"modules"` // [] len >= 0
|
||||
Port *uint16 `json:"port"` // uint16 or null
|
||||
Skew *string `json:"skew"` // BUG doc: floating point number, api object: string or null
|
||||
Status string `json:"status"` // string
|
||||
Version *uint `json:"version"` // uint or null
|
||||
}
|
||||
|
||||
// Broker definition
|
||||
// Broker defines a broker. See https://login.circonus.com/resources/api/calls/broker for more information.
|
||||
type Broker struct {
|
||||
CID string `json:"_cid"`
|
||||
Details []BrokerDetail `json:"_details"`
|
||||
Latitude string `json:"_latitude"`
|
||||
Longitude string `json:"_longitude"`
|
||||
Name string `json:"_name"`
|
||||
Tags []string `json:"_tags"`
|
||||
Type string `json:"_type"`
|
||||
CID string `json:"_cid"` // string
|
||||
Details []BrokerDetail `json:"_details"` // [] len >= 1
|
||||
Latitude *string `json:"_latitude"` // string or null
|
||||
Longitude *string `json:"_longitude"` // string or null
|
||||
Name string `json:"_name"` // string
|
||||
Tags []string `json:"_tags"` // [] len >= 0
|
||||
Type string `json:"_type"` // string
|
||||
}
|
||||
|
||||
const baseBrokerPath = "/broker"
|
||||
|
||||
// FetchBrokerByID fetch a broker configuration by [group]id
|
||||
func (a *API) FetchBrokerByID(id IDType) (*Broker, error) {
|
||||
cid := CIDType(fmt.Sprintf("%s/%d", baseBrokerPath, id))
|
||||
return a.FetchBrokerByCID(cid)
|
||||
}
|
||||
|
||||
// FetchBrokerByCID fetch a broker configuration by cid
|
||||
func (a *API) FetchBrokerByCID(cid CIDType) (*Broker, error) {
|
||||
if matched, err := regexp.MatchString("^"+baseBrokerPath+"/[0-9]+$", string(cid)); err != nil {
|
||||
return nil, err
|
||||
} else if !matched {
|
||||
return nil, fmt.Errorf("Invalid broker CID %v", cid)
|
||||
// FetchBroker retrieves broker with passed cid.
|
||||
func (a *API) FetchBroker(cid CIDType) (*Broker, error) {
|
||||
if cid == nil || *cid == "" {
|
||||
return nil, fmt.Errorf("Invalid broker CID [none]")
|
||||
}
|
||||
|
||||
reqURL := url.URL{
|
||||
Path: string(cid),
|
||||
}
|
||||
brokerCID := string(*cid)
|
||||
|
||||
result, err := a.Get(reqURL.String())
|
||||
matched, err := regexp.MatchString(config.BrokerCIDRegex, brokerCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !matched {
|
||||
return nil, fmt.Errorf("Invalid broker CID [%s]", brokerCID)
|
||||
}
|
||||
|
||||
result, err := a.Get(brokerCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if a.Debug {
|
||||
a.Log.Printf("[DEBUG] fetch broker, received JSON: %s", string(result))
|
||||
}
|
||||
|
||||
response := new(Broker)
|
||||
if err := json.Unmarshal(result, &response); err != nil {
|
||||
@@ -71,32 +75,9 @@ func (a *API) FetchBrokerByCID(cid CIDType) (*Broker, error) {
|
||||
|
||||
}
|
||||
|
||||
// FetchBrokerListByTag return list of brokers with a specific tag
|
||||
func (a *API) FetchBrokerListByTag(searchTag TagType) ([]Broker, error) {
|
||||
query := SearchQueryType(fmt.Sprintf("f__tags_has=%s", strings.Replace(strings.Join(searchTag, ","), ",", "&f__tags_has=", -1)))
|
||||
return a.BrokerSearch(query)
|
||||
}
|
||||
|
||||
// BrokerSearch return a list of brokers matching a query/filter
|
||||
func (a *API) BrokerSearch(query SearchQueryType) ([]Broker, error) {
|
||||
queryURL := fmt.Sprintf("/broker?%s", string(query))
|
||||
|
||||
result, err := a.Get(queryURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var brokers []Broker
|
||||
if err := json.Unmarshal(result, &brokers); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return brokers, nil
|
||||
}
|
||||
|
||||
// FetchBrokerList return list of all brokers available to the api token/app
|
||||
func (a *API) FetchBrokerList() ([]Broker, error) {
|
||||
result, err := a.Get("/broker")
|
||||
// FetchBrokers returns all brokers available to the API Token.
|
||||
func (a *API) FetchBrokers() (*[]Broker, error) {
|
||||
result, err := a.Get(config.BrokerPrefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -106,5 +87,45 @@ func (a *API) FetchBrokerList() ([]Broker, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return response, nil
|
||||
return &response, nil
|
||||
}
|
||||
|
||||
// SearchBrokers returns brokers matching the specified search
|
||||
// query and/or filter. If nil is passed for both parameters
|
||||
// all brokers will be returned.
|
||||
func (a *API) SearchBrokers(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]Broker, error) {
|
||||
q := url.Values{}
|
||||
|
||||
if searchCriteria != nil && *searchCriteria != "" {
|
||||
q.Set("search", string(*searchCriteria))
|
||||
}
|
||||
|
||||
if filterCriteria != nil && len(*filterCriteria) > 0 {
|
||||
for filter, criteria := range *filterCriteria {
|
||||
for _, val := range criteria {
|
||||
q.Add(filter, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if q.Encode() == "" {
|
||||
return a.FetchBrokers()
|
||||
}
|
||||
|
||||
reqURL := url.URL{
|
||||
Path: config.BrokerPrefix,
|
||||
RawQuery: q.Encode(),
|
||||
}
|
||||
|
||||
result, err := a.Get(reqURL.String())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("[ERROR] API call error %+v", err)
|
||||
}
|
||||
|
||||
var brokers []Broker
|
||||
if err := json.Unmarshal(result, &brokers); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &brokers, nil
|
||||
}
|
||||
|
||||
168
vendor/github.com/circonus-labs/circonus-gometrics/api/check.go
generated
vendored
168
vendor/github.com/circonus-labs/circonus-gometrics/api/check.go
generated
vendored
@@ -2,52 +2,58 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Check API support - Fetch and Search
|
||||
// See: https://login.circonus.com/resources/api/calls/check
|
||||
// Notes: checks do not directly support create, update, and delete - see check bundle.
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/circonus-labs/circonus-gometrics/api/config"
|
||||
)
|
||||
|
||||
// CheckDetails is an arbitrary json structure, we would only care about submission_url
|
||||
type CheckDetails struct {
|
||||
SubmissionURL string `json:"submission_url"`
|
||||
}
|
||||
// CheckDetails contains [undocumented] check type specific information
|
||||
type CheckDetails map[config.Key]string
|
||||
|
||||
// Check definition
|
||||
// Check defines a check. See https://login.circonus.com/resources/api/calls/check for more information.
|
||||
type Check struct {
|
||||
CID string `json:"_cid"`
|
||||
Active bool `json:"_active"`
|
||||
BrokerCID string `json:"_broker"`
|
||||
CheckBundleCID string `json:"_check_bundle"`
|
||||
CheckUUID string `json:"_check_uuid"`
|
||||
Details CheckDetails `json:"_details"`
|
||||
Active bool `json:"_active"` // bool
|
||||
BrokerCID string `json:"_broker"` // string
|
||||
CheckBundleCID string `json:"_check_bundle"` // string
|
||||
CheckUUID string `json:"_check_uuid"` // string
|
||||
CID string `json:"_cid"` // string
|
||||
Details CheckDetails `json:"_details"` // NOTE contents of details are check type specific, map len >= 0
|
||||
}
|
||||
|
||||
const baseCheckPath = "/check"
|
||||
|
||||
// FetchCheckByID fetch a check configuration by id
|
||||
func (a *API) FetchCheckByID(id IDType) (*Check, error) {
|
||||
cid := CIDType(fmt.Sprintf("%s/%d", baseCheckPath, int(id)))
|
||||
return a.FetchCheckByCID(cid)
|
||||
}
|
||||
|
||||
// FetchCheckByCID fetch a check configuration by cid
|
||||
func (a *API) FetchCheckByCID(cid CIDType) (*Check, error) {
|
||||
if matched, err := regexp.MatchString("^"+baseCheckPath+"/[0-9]+$", string(cid)); err != nil {
|
||||
return nil, err
|
||||
} else if !matched {
|
||||
return nil, fmt.Errorf("Invalid check CID %v", cid)
|
||||
// FetchCheck retrieves check with passed cid.
|
||||
func (a *API) FetchCheck(cid CIDType) (*Check, error) {
|
||||
if cid == nil || *cid == "" {
|
||||
return nil, fmt.Errorf("Invalid check CID [none]")
|
||||
}
|
||||
|
||||
result, err := a.Get(string(cid))
|
||||
checkCID := string(*cid)
|
||||
|
||||
matched, err := regexp.MatchString(config.CheckCIDRegex, checkCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !matched {
|
||||
return nil, fmt.Errorf("Invalid check CID [%s]", checkCID)
|
||||
}
|
||||
|
||||
result, err := a.Get(checkCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if a.Debug {
|
||||
a.Log.Printf("[DEBUG] fetch check, received JSON: %s", string(result))
|
||||
}
|
||||
|
||||
check := new(Check)
|
||||
if err := json.Unmarshal(result, check); err != nil {
|
||||
@@ -57,70 +63,46 @@ func (a *API) FetchCheckByCID(cid CIDType) (*Check, error) {
|
||||
return check, nil
|
||||
}
|
||||
|
||||
// FetchCheckBySubmissionURL fetch a check configuration by submission_url
|
||||
func (a *API) FetchCheckBySubmissionURL(submissionURL URLType) (*Check, error) {
|
||||
if string(submissionURL) == "" {
|
||||
return nil, errors.New("[ERROR] Invalid submission URL (blank)")
|
||||
}
|
||||
|
||||
u, err := url.Parse(string(submissionURL))
|
||||
// FetchChecks retrieves all checks available to the API Token.
|
||||
func (a *API) FetchChecks() (*[]Check, error) {
|
||||
result, err := a.Get(config.CheckPrefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// valid trap url: scheme://host[:port]/module/httptrap/UUID/secret
|
||||
|
||||
// does it smell like a valid trap url path
|
||||
if !strings.Contains(u.Path, "/module/httptrap/") {
|
||||
return nil, fmt.Errorf("[ERROR] Invalid submission URL '%s', unrecognized path", submissionURL)
|
||||
}
|
||||
|
||||
// extract uuid
|
||||
pathParts := strings.Split(strings.Replace(u.Path, "/module/httptrap/", "", 1), "/")
|
||||
if len(pathParts) != 2 {
|
||||
return nil, fmt.Errorf("[ERROR] Invalid submission URL '%s', UUID not where expected", submissionURL)
|
||||
}
|
||||
uuid := pathParts[0]
|
||||
|
||||
filter := SearchFilterType(fmt.Sprintf("f__check_uuid=%s", uuid))
|
||||
|
||||
checks, err := a.CheckFilterSearch(filter)
|
||||
if err != nil {
|
||||
var checks []Check
|
||||
if err := json.Unmarshal(result, &checks); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(checks) == 0 {
|
||||
return nil, fmt.Errorf("[ERROR] No checks found with UUID %s", uuid)
|
||||
}
|
||||
|
||||
numActive := 0
|
||||
checkID := -1
|
||||
|
||||
for idx, check := range checks {
|
||||
if check.Active {
|
||||
numActive++
|
||||
checkID = idx
|
||||
}
|
||||
}
|
||||
|
||||
if numActive > 1 {
|
||||
return nil, fmt.Errorf("[ERROR] Multiple checks with same UUID %s", uuid)
|
||||
}
|
||||
|
||||
return &checks[checkID], nil
|
||||
|
||||
return &checks, nil
|
||||
}
|
||||
|
||||
// CheckSearch returns a list of checks matching a search query
|
||||
func (a *API) CheckSearch(searchCriteria SearchQueryType) ([]Check, error) {
|
||||
reqURL := url.URL{
|
||||
Path: baseCheckPath,
|
||||
// SearchChecks returns checks matching the specified search query
|
||||
// and/or filter. If nil is passed for both parameters all checks
|
||||
// will be returned.
|
||||
func (a *API) SearchChecks(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]Check, error) {
|
||||
q := url.Values{}
|
||||
|
||||
if searchCriteria != nil && *searchCriteria != "" {
|
||||
q.Set("search", string(*searchCriteria))
|
||||
}
|
||||
|
||||
if searchCriteria != "" {
|
||||
q := url.Values{}
|
||||
q.Set("search", string(searchCriteria))
|
||||
reqURL.RawQuery = q.Encode()
|
||||
if filterCriteria != nil && len(*filterCriteria) > 0 {
|
||||
for filter, criteria := range *filterCriteria {
|
||||
for _, val := range criteria {
|
||||
q.Add(filter, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if q.Encode() == "" {
|
||||
return a.FetchChecks()
|
||||
}
|
||||
|
||||
reqURL := url.URL{
|
||||
Path: config.CheckPrefix,
|
||||
RawQuery: q.Encode(),
|
||||
}
|
||||
|
||||
result, err := a.Get(reqURL.String())
|
||||
@@ -133,27 +115,5 @@ func (a *API) CheckSearch(searchCriteria SearchQueryType) ([]Check, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return checks, nil
|
||||
}
|
||||
|
||||
// CheckFilterSearch returns a list of checks matching a filter (filtering allows looking for
|
||||
// things within sub-elements e.g. details)
|
||||
func (a *API) CheckFilterSearch(filter SearchFilterType) ([]Check, error) {
|
||||
if filter == "" {
|
||||
return nil, errors.New("[ERROR] invalid filter supplied (blank)")
|
||||
}
|
||||
|
||||
filterURL := fmt.Sprintf("/check?%s", string(filter))
|
||||
|
||||
result, err := a.Get(filterURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var checks []Check
|
||||
if err := json.Unmarshal(result, &checks); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return checks, nil
|
||||
return &checks, nil
|
||||
}
|
||||
|
||||
346
vendor/github.com/circonus-labs/circonus-gometrics/api/check_bundle.go
generated
vendored
346
vendor/github.com/circonus-labs/circonus-gometrics/api/check_bundle.go
generated
vendored
@@ -2,6 +2,9 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Check bundle API support - Fetch, Create, Update, Delete, and Search
|
||||
// See: https://login.circonus.com/resources/api/calls/check_bundle
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
@@ -9,75 +12,134 @@ import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
// CheckBundleConfig configuration specific to check type
|
||||
type CheckBundleConfig struct {
|
||||
AsyncMetrics bool `json:"async_metrics"`
|
||||
Secret string `json:"secret"`
|
||||
SubmissionURL string `json:"submission_url"`
|
||||
ReverseSecret string `json:"reverse:secret_key"`
|
||||
HTTPVersion string `json:"http_version,omitempty"`
|
||||
Method string `json:"method,omitempty"`
|
||||
Payload string `json:"payload,omitempty"`
|
||||
Port string `json:"port,omitempty"`
|
||||
ReadLimit string `json:"read_limit,omitempty"`
|
||||
URL string `json:"url,omitempty"`
|
||||
}
|
||||
"github.com/circonus-labs/circonus-gometrics/api/config"
|
||||
)
|
||||
|
||||
// CheckBundleMetric individual metric configuration
|
||||
type CheckBundleMetric struct {
|
||||
Name string `json:"name"`
|
||||
Type string `json:"type"`
|
||||
Units string `json:"units"`
|
||||
Status string `json:"status"`
|
||||
Tags []string `json:"tags"`
|
||||
Name string `json:"name"` // string
|
||||
Result *string `json:"result,omitempty"` // string or null, NOTE not settable - return/information value only
|
||||
Status string `json:"status,omitempty"` // string
|
||||
Tags []string `json:"tags"` // [] len >= 0
|
||||
Type string `json:"type"` // string
|
||||
Units *string `json:"units,omitempty"` // string or null
|
||||
|
||||
}
|
||||
|
||||
// CheckBundle definition
|
||||
// CheckBundleConfig contains the check type specific configuration settings
|
||||
// as k/v pairs (see https://login.circonus.com/resources/api/calls/check_bundle
|
||||
// for the specific settings available for each distinct check type)
|
||||
type CheckBundleConfig map[config.Key]string
|
||||
|
||||
// CheckBundle defines a check bundle. See https://login.circonus.com/resources/api/calls/check_bundle for more information.
|
||||
type CheckBundle struct {
|
||||
CheckUUIDs []string `json:"_check_uuids,omitempty"`
|
||||
Checks []string `json:"_checks,omitempty"`
|
||||
CID string `json:"_cid,omitempty"`
|
||||
Created int `json:"_created,omitempty"`
|
||||
LastModified int `json:"_last_modified,omitempty"`
|
||||
LastModifedBy string `json:"_last_modifed_by,omitempty"`
|
||||
ReverseConnectURLs []string `json:"_reverse_connection_urls"`
|
||||
Brokers []string `json:"brokers"`
|
||||
Config CheckBundleConfig `json:"config"`
|
||||
DisplayName string `json:"display_name"`
|
||||
Metrics []CheckBundleMetric `json:"metrics"`
|
||||
MetricLimit int `json:"metric_limit,omitempty"`
|
||||
Notes string `json:"notes,omitempty"`
|
||||
Period int `json:"period,omitempty"`
|
||||
Status string `json:"status,omitempty"`
|
||||
Tags []string `json:"tags,omitempty"`
|
||||
Target string `json:"target"`
|
||||
Timeout int `json:"timeout,omitempty"`
|
||||
Type string `json:"type"`
|
||||
Brokers []string `json:"brokers"` // [] len >= 0
|
||||
Checks []string `json:"_checks,omitempty"` // [] len >= 0
|
||||
CheckUUIDs []string `json:"_check_uuids,omitempty"` // [] len >= 0
|
||||
CID string `json:"_cid,omitempty"` // string
|
||||
Config CheckBundleConfig `json:"config,omitempty"` // NOTE contents of config are check type specific, map len >= 0
|
||||
Created uint `json:"_created,omitempty"` // uint
|
||||
DisplayName string `json:"display_name"` // string
|
||||
LastModifedBy string `json:"_last_modifed_by,omitempty"` // string
|
||||
LastModified uint `json:"_last_modified,omitempty"` // uint
|
||||
MetricLimit int `json:"metric_limit,omitempty"` // int
|
||||
Metrics []CheckBundleMetric `json:"metrics"` // [] >= 0
|
||||
Notes *string `json:"notes,omitempty"` // string or null
|
||||
Period uint `json:"period,omitempty"` // uint
|
||||
ReverseConnectURLs []string `json:"_reverse_connection_urls,omitempty"` // [] len >= 0
|
||||
Status string `json:"status,omitempty"` // string
|
||||
Tags []string `json:"tags,omitempty"` // [] len >= 0
|
||||
Target string `json:"target"` // string
|
||||
Timeout float32 `json:"timeout,omitempty"` // float32
|
||||
Type string `json:"type"` // string
|
||||
}
|
||||
|
||||
const baseCheckBundlePath = "/check_bundle"
|
||||
|
||||
// FetchCheckBundleByID fetch a check bundle configuration by id
|
||||
func (a *API) FetchCheckBundleByID(id IDType) (*CheckBundle, error) {
|
||||
cid := CIDType(fmt.Sprintf("%s/%d", baseCheckBundlePath, id))
|
||||
return a.FetchCheckBundleByCID(cid)
|
||||
// NewCheckBundle returns new CheckBundle (with defaults, if applicable)
|
||||
func NewCheckBundle() *CheckBundle {
|
||||
return &CheckBundle{
|
||||
Config: make(CheckBundleConfig, config.DefaultConfigOptionsSize),
|
||||
MetricLimit: config.DefaultCheckBundleMetricLimit,
|
||||
Period: config.DefaultCheckBundlePeriod,
|
||||
Timeout: config.DefaultCheckBundleTimeout,
|
||||
Status: config.DefaultCheckBundleStatus,
|
||||
}
|
||||
}
|
||||
|
||||
// FetchCheckBundleByCID fetch a check bundle configuration by id
|
||||
func (a *API) FetchCheckBundleByCID(cid CIDType) (*CheckBundle, error) {
|
||||
if matched, err := regexp.MatchString("^"+baseCheckBundlePath+"/[0-9]+$", string(cid)); err != nil {
|
||||
// FetchCheckBundle retrieves check bundle with passed cid.
|
||||
func (a *API) FetchCheckBundle(cid CIDType) (*CheckBundle, error) {
|
||||
if cid == nil || *cid == "" {
|
||||
return nil, fmt.Errorf("Invalid check bundle CID [none]")
|
||||
}
|
||||
|
||||
bundleCID := string(*cid)
|
||||
|
||||
matched, err := regexp.MatchString(config.CheckBundleCIDRegex, bundleCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if !matched {
|
||||
return nil, fmt.Errorf("Invalid check bundle CID %v", cid)
|
||||
}
|
||||
if !matched {
|
||||
return nil, fmt.Errorf("Invalid check bundle CID [%v]", bundleCID)
|
||||
}
|
||||
|
||||
reqURL := url.URL{
|
||||
Path: string(cid),
|
||||
result, err := a.Get(bundleCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
result, err := a.Get(reqURL.String())
|
||||
if a.Debug {
|
||||
a.Log.Printf("[DEBUG] fetch check bundle, received JSON: %s", string(result))
|
||||
}
|
||||
|
||||
checkBundle := &CheckBundle{}
|
||||
if err := json.Unmarshal(result, checkBundle); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return checkBundle, nil
|
||||
}
|
||||
|
||||
// FetchCheckBundles retrieves all check bundles available to the API Token.
|
||||
func (a *API) FetchCheckBundles() (*[]CheckBundle, error) {
|
||||
result, err := a.Get(config.CheckBundlePrefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var checkBundles []CheckBundle
|
||||
if err := json.Unmarshal(result, &checkBundles); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &checkBundles, nil
|
||||
}
|
||||
|
||||
// UpdateCheckBundle updates passed check bundle.
|
||||
func (a *API) UpdateCheckBundle(cfg *CheckBundle) (*CheckBundle, error) {
|
||||
if cfg == nil {
|
||||
return nil, fmt.Errorf("Invalid check bundle config [nil]")
|
||||
}
|
||||
|
||||
bundleCID := string(cfg.CID)
|
||||
|
||||
matched, err := regexp.MatchString(config.CheckBundleCIDRegex, bundleCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !matched {
|
||||
return nil, fmt.Errorf("Invalid check bundle CID [%s]", bundleCID)
|
||||
}
|
||||
|
||||
jsonCfg, err := json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if a.Debug {
|
||||
a.Log.Printf("[DEBUG] update check bundle, sending JSON: %s", string(jsonCfg))
|
||||
}
|
||||
|
||||
result, err := a.Put(bundleCID, jsonCfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -90,17 +152,93 @@ func (a *API) FetchCheckBundleByCID(cid CIDType) (*CheckBundle, error) {
|
||||
return checkBundle, nil
|
||||
}
|
||||
|
||||
// CheckBundleSearch returns list of check bundles matching a search query
|
||||
// - a search query (see: https://login.circonus.com/resources/api#searching)
|
||||
func (a *API) CheckBundleSearch(searchCriteria SearchQueryType) ([]CheckBundle, error) {
|
||||
reqURL := url.URL{
|
||||
Path: baseCheckBundlePath,
|
||||
// CreateCheckBundle creates a new check bundle (check).
|
||||
func (a *API) CreateCheckBundle(cfg *CheckBundle) (*CheckBundle, error) {
|
||||
if cfg == nil {
|
||||
return nil, fmt.Errorf("Invalid check bundle config [nil]")
|
||||
}
|
||||
|
||||
if searchCriteria != "" {
|
||||
jsonCfg, err := json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if a.Debug {
|
||||
a.Log.Printf("[DEBUG] create check bundle, sending JSON: %s", string(jsonCfg))
|
||||
}
|
||||
|
||||
result, err := a.Post(config.CheckBundlePrefix, jsonCfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
checkBundle := &CheckBundle{}
|
||||
if err := json.Unmarshal(result, checkBundle); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return checkBundle, nil
|
||||
}
|
||||
|
||||
// DeleteCheckBundle deletes passed check bundle.
|
||||
func (a *API) DeleteCheckBundle(cfg *CheckBundle) (bool, error) {
|
||||
if cfg == nil {
|
||||
return false, fmt.Errorf("Invalid check bundle config [nil]")
|
||||
}
|
||||
return a.DeleteCheckBundleByCID(CIDType(&cfg.CID))
|
||||
}
|
||||
|
||||
// DeleteCheckBundleByCID deletes check bundle with passed cid.
|
||||
func (a *API) DeleteCheckBundleByCID(cid CIDType) (bool, error) {
|
||||
|
||||
if cid == nil || *cid == "" {
|
||||
return false, fmt.Errorf("Invalid check bundle CID [none]")
|
||||
}
|
||||
|
||||
bundleCID := string(*cid)
|
||||
|
||||
matched, err := regexp.MatchString(config.CheckBundleCIDRegex, bundleCID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !matched {
|
||||
return false, fmt.Errorf("Invalid check bundle CID [%v]", bundleCID)
|
||||
}
|
||||
|
||||
_, err = a.Delete(bundleCID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// SearchCheckBundles returns check bundles matching the specified
|
||||
// search query and/or filter. If nil is passed for both parameters
|
||||
// all check bundles will be returned.
|
||||
func (a *API) SearchCheckBundles(searchCriteria *SearchQueryType, filterCriteria *map[string][]string) (*[]CheckBundle, error) {
|
||||
|
||||
q := url.Values{}
|
||||
q.Set("search", string(searchCriteria))
|
||||
reqURL.RawQuery = q.Encode()
|
||||
|
||||
if searchCriteria != nil && *searchCriteria != "" {
|
||||
q.Set("search", string(*searchCriteria))
|
||||
}
|
||||
|
||||
if filterCriteria != nil && len(*filterCriteria) > 0 {
|
||||
for filter, criteria := range *filterCriteria {
|
||||
for _, val := range criteria {
|
||||
q.Add(filter, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if q.Encode() == "" {
|
||||
return a.FetchCheckBundles()
|
||||
}
|
||||
|
||||
reqURL := url.URL{
|
||||
Path: config.CheckBundlePrefix,
|
||||
RawQuery: q.Encode(),
|
||||
}
|
||||
|
||||
resp, err := a.Get(reqURL.String())
|
||||
@@ -113,89 +251,5 @@ func (a *API) CheckBundleSearch(searchCriteria SearchQueryType) ([]CheckBundle,
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// CheckBundleFilterSearch returns list of check bundles matching a search query and filter
|
||||
// - a search query (see: https://login.circonus.com/resources/api#searching)
|
||||
// - a filter (see: https://login.circonus.com/resources/api#filtering)
|
||||
func (a *API) CheckBundleFilterSearch(searchCriteria SearchQueryType, filterCriteria map[string]string) ([]CheckBundle, error) {
|
||||
reqURL := url.URL{
|
||||
Path: baseCheckBundlePath,
|
||||
}
|
||||
|
||||
if searchCriteria != "" {
|
||||
q := url.Values{}
|
||||
q.Set("search", string(searchCriteria))
|
||||
for field, val := range filterCriteria {
|
||||
q.Set(field, val)
|
||||
}
|
||||
reqURL.RawQuery = q.Encode()
|
||||
}
|
||||
|
||||
resp, err := a.Get(reqURL.String())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("[ERROR] API call error %+v", err)
|
||||
}
|
||||
|
||||
var results []CheckBundle
|
||||
if err := json.Unmarshal(resp, &results); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return results, nil
|
||||
}
|
||||
|
||||
// CreateCheckBundle create a new check bundle (check)
|
||||
func (a *API) CreateCheckBundle(config *CheckBundle) (*CheckBundle, error) {
|
||||
reqURL := url.URL{
|
||||
Path: baseCheckBundlePath,
|
||||
}
|
||||
|
||||
cfg, err := json.Marshal(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := a.Post(reqURL.String(), cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
checkBundle := &CheckBundle{}
|
||||
if err := json.Unmarshal(resp, checkBundle); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return checkBundle, nil
|
||||
}
|
||||
|
||||
// UpdateCheckBundle updates a check bundle configuration
|
||||
func (a *API) UpdateCheckBundle(config *CheckBundle) (*CheckBundle, error) {
|
||||
if matched, err := regexp.MatchString("^"+baseCheckBundlePath+"/[0-9]+$", string(config.CID)); err != nil {
|
||||
return nil, err
|
||||
} else if !matched {
|
||||
return nil, fmt.Errorf("Invalid check bundle CID %v", config.CID)
|
||||
}
|
||||
|
||||
reqURL := url.URL{
|
||||
Path: config.CID,
|
||||
}
|
||||
|
||||
cfg, err := json.Marshal(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := a.Put(reqURL.String(), cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
checkBundle := &CheckBundle{}
|
||||
if err := json.Unmarshal(resp, checkBundle); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return checkBundle, nil
|
||||
return &results, nil
|
||||
}
|
||||
|
||||
95
vendor/github.com/circonus-labs/circonus-gometrics/api/check_bundle_metrics.go
generated
vendored
Normal file
95
vendor/github.com/circonus-labs/circonus-gometrics/api/check_bundle_metrics.go
generated
vendored
Normal file
@@ -0,0 +1,95 @@
|
||||
// Copyright 2016 Circonus, Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// CheckBundleMetrics API support - Fetch, Create*, Update, and Delete**
|
||||
// See: https://login.circonus.com/resources/api/calls/check_bundle_metrics
|
||||
// * : create metrics by adding to array with a status of 'active'
|
||||
// ** : delete (distable collection of) metrics by changing status from 'active' to 'available'
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"regexp"
|
||||
|
||||
"github.com/circonus-labs/circonus-gometrics/api/config"
|
||||
)
|
||||
|
||||
// CheckBundleMetrics defines metrics for a specific check bundle. See https://login.circonus.com/resources/api/calls/check_bundle_metrics for more information.
|
||||
type CheckBundleMetrics struct {
|
||||
CID string `json:"_cid,omitempty"` // string
|
||||
Metrics []CheckBundleMetric `json:"metrics"` // See check_bundle.go for CheckBundleMetric definition
|
||||
}
|
||||
|
||||
// FetchCheckBundleMetrics retrieves metrics for the check bundle with passed cid.
|
||||
func (a *API) FetchCheckBundleMetrics(cid CIDType) (*CheckBundleMetrics, error) {
|
||||
if cid == nil || *cid == "" {
|
||||
return nil, fmt.Errorf("Invalid check bundle metrics CID [none]")
|
||||
}
|
||||
|
||||
metricsCID := string(*cid)
|
||||
|
||||
matched, err := regexp.MatchString(config.CheckBundleMetricsCIDRegex, metricsCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !matched {
|
||||
return nil, fmt.Errorf("Invalid check bundle metrics CID [%s]", metricsCID)
|
||||
}
|
||||
|
||||
result, err := a.Get(metricsCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if a.Debug {
|
||||
a.Log.Printf("[DEBUG] fetch check bundle metrics, received JSON: %s", string(result))
|
||||
}
|
||||
|
||||
metrics := &CheckBundleMetrics{}
|
||||
if err := json.Unmarshal(result, metrics); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return metrics, nil
|
||||
}
|
||||
|
||||
// UpdateCheckBundleMetrics updates passed metrics.
|
||||
func (a *API) UpdateCheckBundleMetrics(cfg *CheckBundleMetrics) (*CheckBundleMetrics, error) {
|
||||
if cfg == nil {
|
||||
return nil, fmt.Errorf("Invalid check bundle metrics config [nil]")
|
||||
}
|
||||
|
||||
metricsCID := string(cfg.CID)
|
||||
|
||||
matched, err := regexp.MatchString(config.CheckBundleMetricsCIDRegex, metricsCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !matched {
|
||||
return nil, fmt.Errorf("Invalid check bundle metrics CID [%s]", metricsCID)
|
||||
}
|
||||
|
||||
jsonCfg, err := json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if a.Debug {
|
||||
a.Log.Printf("[DEBUG] update check bundle metrics, sending JSON: %s", string(jsonCfg))
|
||||
}
|
||||
|
||||
result, err := a.Put(metricsCID, jsonCfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
metrics := &CheckBundleMetrics{}
|
||||
if err := json.Unmarshal(result, metrics); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return metrics, nil
|
||||
}
|
||||
538
vendor/github.com/circonus-labs/circonus-gometrics/api/config/consts.go
generated
vendored
Normal file
538
vendor/github.com/circonus-labs/circonus-gometrics/api/config/consts.go
generated
vendored
Normal file
@@ -0,0 +1,538 @@
|
||||
package config
|
||||
|
||||
// Key for CheckBundleConfig options and CheckDetails info
|
||||
type Key string
|
||||
|
||||
// Constants per type as defined in
|
||||
// https://login.circonus.com/resources/api/calls/check_bundle
|
||||
const (
|
||||
//
|
||||
// default settings for api.NewCheckBundle()
|
||||
//
|
||||
DefaultCheckBundleMetricLimit = -1 // unlimited
|
||||
DefaultCheckBundleStatus = "active"
|
||||
DefaultCheckBundlePeriod = 60
|
||||
DefaultCheckBundleTimeout = 10
|
||||
DefaultConfigOptionsSize = 20
|
||||
|
||||
//
|
||||
// common (apply to more than one check type)
|
||||
//
|
||||
AsyncMetrics = Key("async_metrics")
|
||||
AuthMethod = Key("auth_method")
|
||||
AuthPassword = Key("auth_password")
|
||||
AuthUser = Key("auth_user")
|
||||
BaseURL = Key("base_url")
|
||||
CAChain = Key("ca_chain")
|
||||
CertFile = Key("certificate_file")
|
||||
Ciphers = Key("ciphers")
|
||||
Command = Key("command")
|
||||
DSN = Key("dsn")
|
||||
HeaderPrefix = Key("header_")
|
||||
HTTPVersion = Key("http_version")
|
||||
KeyFile = Key("key_file")
|
||||
Method = Key("method")
|
||||
Password = Key("password")
|
||||
Payload = Key("payload")
|
||||
Port = Key("port")
|
||||
Query = Key("query")
|
||||
ReadLimit = Key("read_limit")
|
||||
Secret = Key("secret")
|
||||
SQL = Key("sql")
|
||||
URI = Key("uri")
|
||||
URL = Key("url")
|
||||
Username = Key("username")
|
||||
UseSSL = Key("use_ssl")
|
||||
User = Key("user")
|
||||
SASLAuthentication = Key("sasl_authentication")
|
||||
SASLUser = Key("sasl_user")
|
||||
SecurityLevel = Key("security_level")
|
||||
Version = Key("version")
|
||||
AppendColumnName = Key("append_column_name")
|
||||
Database = Key("database")
|
||||
JDBCPrefix = Key("jdbc_")
|
||||
|
||||
//
|
||||
// CAQL check
|
||||
//
|
||||
// Common items:
|
||||
// Query
|
||||
|
||||
//
|
||||
// Circonus Windows Agent
|
||||
//
|
||||
// Common items:
|
||||
// AuthPassword
|
||||
// AuthUser
|
||||
// Port
|
||||
// URL
|
||||
Calculated = Key("calculated")
|
||||
Category = Key("category")
|
||||
|
||||
//
|
||||
// Cloudwatch
|
||||
//
|
||||
// Notes:
|
||||
// DimPrefix is special because the actual key is dynamic and matches: `dim_(.+)`
|
||||
// Common items:
|
||||
// URL
|
||||
// Version
|
||||
APIKey = Key("api_key")
|
||||
APISecret = Key("api_secret")
|
||||
CloudwatchMetrics = Key("cloudwatch_metrics")
|
||||
DimPrefix = Key("dim_")
|
||||
Granularity = Key("granularity")
|
||||
Namespace = Key("namespace")
|
||||
Statistics = Key("statistics")
|
||||
|
||||
//
|
||||
// Collectd
|
||||
//
|
||||
// Common items:
|
||||
// AsyncMetrics
|
||||
// Username
|
||||
// Secret
|
||||
// SecurityLevel
|
||||
|
||||
//
|
||||
// Composite
|
||||
//
|
||||
CompositeMetricName = Key("composite_metric_name")
|
||||
Formula = Key("formula")
|
||||
|
||||
//
|
||||
// DHCP
|
||||
//
|
||||
HardwareAddress = Key("hardware_addr")
|
||||
HostIP = Key("host_ip")
|
||||
RequestType = Key("request_type")
|
||||
SendPort = Key("send_port")
|
||||
|
||||
//
|
||||
// DNS
|
||||
//
|
||||
// Common items:
|
||||
// Query
|
||||
CType = Key("ctype")
|
||||
Nameserver = Key("nameserver")
|
||||
RType = Key("rtype")
|
||||
|
||||
//
|
||||
// EC Console
|
||||
//
|
||||
// Common items:
|
||||
// Command
|
||||
// Port
|
||||
// SASLAuthentication
|
||||
// SASLUser
|
||||
Objects = Key("objects")
|
||||
XPath = Key("xpath")
|
||||
|
||||
//
|
||||
// Elastic Search
|
||||
//
|
||||
// Common items:
|
||||
// Port
|
||||
// URL
|
||||
|
||||
//
|
||||
// Ganglia
|
||||
//
|
||||
// Common items:
|
||||
// AsyncMetrics
|
||||
|
||||
//
|
||||
// Google Analytics
|
||||
//
|
||||
// Common items:
|
||||
// Password
|
||||
// Username
|
||||
OAuthToken = Key("oauth_token")
|
||||
OAuthTokenSecret = Key("oauth_token_secret")
|
||||
OAuthVersion = Key("oauth_version")
|
||||
TableID = Key("table_id")
|
||||
UseOAuth = Key("use_oauth")
|
||||
|
||||
//
|
||||
// HA Proxy
|
||||
//
|
||||
// Common items:
|
||||
// AuthPassword
|
||||
// AuthUser
|
||||
// Port
|
||||
// UseSSL
|
||||
Host = Key("host")
|
||||
Select = Key("select")
|
||||
|
||||
//
|
||||
// HTTP
|
||||
//
|
||||
// Notes:
|
||||
// HeaderPrefix is special because the actual key is dynamic and matches: `header_(\S+)`
|
||||
// Common items:
|
||||
// AuthMethod
|
||||
// AuthPassword
|
||||
// AuthUser
|
||||
// CAChain
|
||||
// CertFile
|
||||
// Ciphers
|
||||
// KeyFile
|
||||
// URL
|
||||
// HeaderPrefix
|
||||
// HTTPVersion
|
||||
// Method
|
||||
// Payload
|
||||
// ReadLimit
|
||||
Body = Key("body")
|
||||
Code = Key("code")
|
||||
Extract = Key("extract")
|
||||
Redirects = Key("redirects")
|
||||
|
||||
//
|
||||
// HTTPTRAP
|
||||
//
|
||||
// Common items:
|
||||
// AsyncMetrics
|
||||
// Secret
|
||||
|
||||
//
|
||||
// IMAP
|
||||
//
|
||||
// Common items:
|
||||
// AuthPassword
|
||||
// AuthUser
|
||||
// CAChain
|
||||
// CertFile
|
||||
// Ciphers
|
||||
// KeyFile
|
||||
// Port
|
||||
// UseSSL
|
||||
Fetch = Key("fetch")
|
||||
Folder = Key("folder")
|
||||
HeaderHost = Key("header_Host")
|
||||
Search = Key("search")
|
||||
|
||||
//
|
||||
// JMX
|
||||
//
|
||||
// Common items:
|
||||
// Password
|
||||
// Port
|
||||
// URI
|
||||
// Username
|
||||
MbeanDomains = Key("mbean_domains")
|
||||
|
||||
//
|
||||
// JSON
|
||||
//
|
||||
// Common items:
|
||||
// AuthMethod
|
||||
// AuthPassword
|
||||
// AuthUser
|
||||
// CAChain
|
||||
// CertFile
|
||||
// Ciphers
|
||||
// HeaderPrefix
|
||||
// HTTPVersion
|
||||
// KeyFile
|
||||
// Method
|
||||
// Payload
|
||||
// Port
|
||||
// ReadLimit
|
||||
// URL
|
||||
|
||||
//
|
||||
// Keynote
|
||||
//
|
||||
// Notes:
|
||||
// SlotAliasPrefix is special because the actual key is dynamic and matches: `slot_alias_(\d+)`
|
||||
// Common items:
|
||||
// APIKey
|
||||
// BaseURL
|
||||
PageComponent = Key("pagecomponent")
|
||||
SlotAliasPrefix = Key("slot_alias_")
|
||||
SlotIDList = Key("slot_id_list")
|
||||
TransPageList = Key("transpagelist")
|
||||
|
||||
//
|
||||
// Keynote Pulse
|
||||
//
|
||||
// Common items:
|
||||
// BaseURL
|
||||
// Password
|
||||
// User
|
||||
AgreementID = Key("agreement_id")
|
||||
|
||||
//
|
||||
// LDAP
|
||||
//
|
||||
// Common items:
|
||||
// Password
|
||||
// Port
|
||||
AuthType = Key("authtype")
|
||||
DN = Key("dn")
|
||||
SecurityPrincipal = Key("security_principal")
|
||||
|
||||
//
|
||||
// Memcached
|
||||
//
|
||||
// Common items:
|
||||
// Port
|
||||
|
||||
//
|
||||
// MongoDB
|
||||
//
|
||||
// Common items:
|
||||
// Command
|
||||
// Password
|
||||
// Port
|
||||
// Username
|
||||
DBName = Key("dbname")
|
||||
|
||||
//
|
||||
// Munin
|
||||
//
|
||||
// Note: no configuration options
|
||||
|
||||
//
|
||||
// MySQL
|
||||
//
|
||||
// Common items:
|
||||
// DSN
|
||||
// SQL
|
||||
|
||||
//
|
||||
// Newrelic rpm
|
||||
//
|
||||
// Common items:
|
||||
// APIKey
|
||||
AccountID = Key("acct_id")
|
||||
ApplicationID = Key("application_id")
|
||||
LicenseKey = Key("license_key")
|
||||
|
||||
//
|
||||
// Nginx
|
||||
//
|
||||
// Common items:
|
||||
// CAChain
|
||||
// CertFile
|
||||
// Ciphers
|
||||
// KeyFile
|
||||
// URL
|
||||
|
||||
//
|
||||
// NRPE
|
||||
//
|
||||
// Common items:
|
||||
// Command
|
||||
// Port
|
||||
// UseSSL
|
||||
AppendUnits = Key("append_uom")
|
||||
|
||||
//
|
||||
// NTP
|
||||
//
|
||||
// Common items:
|
||||
// Port
|
||||
Control = Key("control")
|
||||
|
||||
//
|
||||
// Oracle
|
||||
//
|
||||
// Notes:
|
||||
// JDBCPrefix is special because the actual key is dynamic and matches: `jdbc_(\S+)`
|
||||
// Common items:
|
||||
// AppendColumnName
|
||||
// Database
|
||||
// JDBCPrefix
|
||||
// Password
|
||||
// Port
|
||||
// SQL
|
||||
// User
|
||||
|
||||
//
|
||||
// Ping ICMP
|
||||
//
|
||||
AvailNeeded = Key("avail_needed")
|
||||
Count = Key("count")
|
||||
Interval = Key("interval")
|
||||
|
||||
//
|
||||
// PostgreSQL
|
||||
//
|
||||
// Common items:
|
||||
// DSN
|
||||
// SQL
|
||||
|
||||
//
|
||||
// Redis
|
||||
//
|
||||
// Common items:
|
||||
// Command
|
||||
// Password
|
||||
// Port
|
||||
DBIndex = Key("dbindex")
|
||||
|
||||
//
|
||||
// Resmon
|
||||
//
|
||||
// Notes:
|
||||
// HeaderPrefix is special because the actual key is dynamic and matches: `header_(\S+)`
|
||||
// Common items:
|
||||
// AuthMethod
|
||||
// AuthPassword
|
||||
// AuthUser
|
||||
// CAChain
|
||||
// CertFile
|
||||
// Ciphers
|
||||
// HeaderPrefix
|
||||
// HTTPVersion
|
||||
// KeyFile
|
||||
// Method
|
||||
// Payload
|
||||
// Port
|
||||
// ReadLimit
|
||||
// URL
|
||||
|
||||
//
|
||||
// SMTP
|
||||
//
|
||||
// Common items:
|
||||
// Payload
|
||||
// Port
|
||||
// SASLAuthentication
|
||||
// SASLUser
|
||||
EHLO = Key("ehlo")
|
||||
From = Key("from")
|
||||
SASLAuthID = Key("sasl_auth_id")
|
||||
SASLPassword = Key("sasl_password")
|
||||
StartTLS = Key("starttls")
|
||||
To = Key("to")
|
||||
|
||||
//
|
||||
// SNMP
|
||||
//
|
||||
// Notes:
|
||||
// OIDPrefix is special because the actual key is dynamic and matches: `oid_(.+)`
|
||||
// TypePrefix is special because the actual key is dynamic and matches: `type_(.+)`
|
||||
// Common items:
|
||||
// Port
|
||||
// SecurityLevel
|
||||
// Version
|
||||
AuthPassphrase = Key("auth_passphrase")
|
||||
AuthProtocol = Key("auth_protocol")
|
||||
Community = Key("community")
|
||||
ContextEngine = Key("context_engine")
|
||||
ContextName = Key("context_name")
|
||||
OIDPrefix = Key("oid_")
|
||||
PrivacyPassphrase = Key("privacy_passphrase")
|
||||
PrivacyProtocol = Key("privacy_protocol")
|
||||
SecurityEngine = Key("security_engine")
|
||||
SecurityName = Key("security_name")
|
||||
SeparateQueries = Key("separate_queries")
|
||||
TypePrefix = Key("type_")
|
||||
|
||||
//
|
||||
// SQLServer
|
||||
//
|
||||
// Notes:
|
||||
// JDBCPrefix is special because the actual key is dynamic and matches: `jdbc_(\S+)`
|
||||
// Common items:
|
||||
// AppendColumnName
|
||||
// Database
|
||||
// JDBCPrefix
|
||||
// Password
|
||||
// Port
|
||||
// SQL
|
||||
// User
|
||||
|
||||
//
|
||||
// SSH v2
|
||||
//
|
||||
// Common items:
|
||||
// Port
|
||||
MethodCompCS = Key("method_comp_cs")
|
||||
MethodCompSC = Key("method_comp_sc")
|
||||
MethodCryptCS = Key("method_crypt_cs")
|
||||
MethodCryptSC = Key("method_crypt_sc")
|
||||
MethodHostKey = Key("method_hostkey")
|
||||
MethodKeyExchange = Key("method_kex")
|
||||
MethodMacCS = Key("method_mac_cs")
|
||||
MethodMacSC = Key("method_mac_sc")
|
||||
|
||||
//
|
||||
// StatsD
|
||||
//
|
||||
// Note: no configuration options
|
||||
|
||||
//
|
||||
// TCP
|
||||
//
|
||||
// Common items:
|
||||
// CAChain
|
||||
// CertFile
|
||||
// Ciphers
|
||||
// KeyFile
|
||||
// Port
|
||||
// UseSSL
|
||||
BannerMatch = Key("banner_match")
|
||||
|
||||
//
|
||||
// Varnish
|
||||
//
|
||||
// Note: no configuration options
|
||||
|
||||
//
|
||||
// reserved - config option(s) can't actually be set - here for r/o access
|
||||
//
|
||||
ReverseSecretKey = Key("reverse:secret_key")
|
||||
SubmissionURL = Key("submission_url")
|
||||
|
||||
//
|
||||
// Endpoint prefix & cid regex
|
||||
//
|
||||
DefaultCIDRegex = "[0-9]+"
|
||||
DefaultUUIDRegex = "[[:xdigit:]]{8}-[[:xdigit:]]{4}-[[:xdigit:]]{4}-[[:xdigit:]]{4}-[[:xdigit:]]{12}"
|
||||
AccountPrefix = "/account"
|
||||
AccountCIDRegex = "^(" + AccountPrefix + "/(" + DefaultCIDRegex + "|current))$"
|
||||
AcknowledgementPrefix = "/acknowledgement"
|
||||
AcknowledgementCIDRegex = "^(" + AcknowledgementPrefix + "/(" + DefaultCIDRegex + "))$"
|
||||
AlertPrefix = "/alert"
|
||||
AlertCIDRegex = "^(" + AlertPrefix + "/(" + DefaultCIDRegex + "))$"
|
||||
AnnotationPrefix = "/annotation"
|
||||
AnnotationCIDRegex = "^(" + AnnotationPrefix + "/(" + DefaultCIDRegex + "))$"
|
||||
BrokerPrefix = "/broker"
|
||||
BrokerCIDRegex = "^(" + BrokerPrefix + "/(" + DefaultCIDRegex + "))$"
|
||||
CheckBundleMetricsPrefix = "/check_bundle_metrics"
|
||||
CheckBundleMetricsCIDRegex = "^(" + CheckBundleMetricsPrefix + "/(" + DefaultCIDRegex + "))$"
|
||||
CheckBundlePrefix = "/check_bundle"
|
||||
CheckBundleCIDRegex = "^(" + CheckBundlePrefix + "/(" + DefaultCIDRegex + "))$"
|
||||
CheckPrefix = "/check"
|
||||
CheckCIDRegex = "^(" + CheckPrefix + "/(" + DefaultCIDRegex + "))$"
|
||||
ContactGroupPrefix = "/contact_group"
|
||||
ContactGroupCIDRegex = "^(" + ContactGroupPrefix + "/(" + DefaultCIDRegex + "))$"
|
||||
DashboardPrefix = "/dashboard"
|
||||
DashboardCIDRegex = "^(" + DashboardPrefix + "/(" + DefaultCIDRegex + "))$"
|
||||
GraphPrefix = "/graph"
|
||||
GraphCIDRegex = "^(" + GraphPrefix + "/(" + DefaultUUIDRegex + "))$"
|
||||
MaintenancePrefix = "/maintenance"
|
||||
MaintenanceCIDRegex = "^(" + MaintenancePrefix + "/(" + DefaultCIDRegex + "))$"
|
||||
MetricClusterPrefix = "/metric_cluster"
|
||||
MetricClusterCIDRegex = "^(" + MetricClusterPrefix + "/(" + DefaultCIDRegex + "))$"
|
||||
MetricPrefix = "/metric"
|
||||
MetricCIDRegex = "^(" + MetricPrefix + "/((" + DefaultCIDRegex + ")_([^[:space:]]+)))$"
|
||||
OutlierReportPrefix = "/outlier_report"
|
||||
OutlierReportCIDRegex = "^(" + OutlierReportPrefix + "/(" + DefaultCIDRegex + "))$"
|
||||
ProvisionBrokerPrefix = "/provision_broker"
|
||||
ProvisionBrokerCIDRegex = "^(" + ProvisionBrokerPrefix + "/([a-z0-9]+-[a-z0-9]+))$"
|
||||
RuleSetGroupPrefix = "/rule_set_group"
|
||||
RuleSetGroupCIDRegex = "^(" + RuleSetGroupPrefix + "/(" + DefaultCIDRegex + "))$"
|
||||
RuleSetPrefix = "/rule_set"
|
||||
RuleSetCIDRegex = "^(" + RuleSetPrefix + "/((" + DefaultCIDRegex + ")_([^[:space:]]+)))$"
|
||||
UserPrefix = "/user"
|
||||
UserCIDRegex = "^(" + UserPrefix + "/(" + DefaultCIDRegex + "|current))$"
|
||||
WorksheetPrefix = "/worksheet"
|
||||
WorksheetCIDRegex = "^(" + WorksheetPrefix + "/(" + DefaultUUIDRegex + "))$"
|
||||
// contact group serverity levels
|
||||
NumSeverityLevels = 5
|
||||
)
|
||||
263
vendor/github.com/circonus-labs/circonus-gometrics/api/contact_group.go
generated
vendored
Normal file
263
vendor/github.com/circonus-labs/circonus-gometrics/api/contact_group.go
generated
vendored
Normal file
@@ -0,0 +1,263 @@
|
||||
// Copyright 2016 Circonus, Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Contact Group API support - Fetch, Create, Update, Delete, and Search
|
||||
// See: https://login.circonus.com/resources/api/calls/contact_group
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
|
||||
"github.com/circonus-labs/circonus-gometrics/api/config"
|
||||
)
|
||||
|
||||
// ContactGroupAlertFormats define alert formats
|
||||
type ContactGroupAlertFormats struct {
|
||||
LongMessage *string `json:"long_message"` // string or null
|
||||
LongSubject *string `json:"long_subject"` // string or null
|
||||
LongSummary *string `json:"long_summary"` // string or null
|
||||
ShortMessage *string `json:"short_message"` // string or null
|
||||
ShortSummary *string `json:"short_summary"` // string or null
|
||||
}
|
||||
|
||||
// ContactGroupContactsExternal external contacts
|
||||
type ContactGroupContactsExternal struct {
|
||||
Info string `json:"contact_info"` // string
|
||||
Method string `json:"method"` // string
|
||||
}
|
||||
|
||||
// ContactGroupContactsUser user contacts
|
||||
type ContactGroupContactsUser struct {
|
||||
Info string `json:"_contact_info,omitempty"` // string
|
||||
Method string `json:"method"` // string
|
||||
UserCID string `json:"user"` // string
|
||||
}
|
||||
|
||||
// ContactGroupContacts list of contacts
|
||||
type ContactGroupContacts struct {
|
||||
External []ContactGroupContactsExternal `json:"external"` // [] len >= 0
|
||||
Users []ContactGroupContactsUser `json:"users"` // [] len >= 0
|
||||
}
|
||||
|
||||
// ContactGroupEscalation defines escalations for severity levels
|
||||
type ContactGroupEscalation struct {
|
||||
After uint `json:"after"` // uint
|
||||
ContactGroupCID string `json:"contact_group"` // string
|
||||
}
|
||||
|
||||
// ContactGroup defines a contact group. See https://login.circonus.com/resources/api/calls/contact_group for more information.
|
||||
type ContactGroup struct {
|
||||
AggregationWindow uint `json:"aggregation_window,omitempty"` // uint
|
||||
AlertFormats ContactGroupAlertFormats `json:"alert_formats,omitempty"` // ContactGroupAlertFormats
|
||||
CID string `json:"_cid,omitempty"` // string
|
||||
Contacts ContactGroupContacts `json:"contacts,omitempty"` // ContactGroupContacts
|
||||
Escalations []*ContactGroupEscalation `json:"escalations,omitempty"` // [] len == 5, elements: ContactGroupEscalation or null
|
||||
LastModified uint `json:"_last_modified,omitempty"` // uint
|
||||
LastModifiedBy string `json:"_last_modified_by,omitempty"` // string
|
||||
Name string `json:"name,omitempty"` // string
|
||||
Reminders []uint `json:"reminders,omitempty"` // [] len == 5
|
||||
Tags []string `json:"tags,omitempty"` // [] len >= 0
|
||||
}
|
||||
|
||||
// NewContactGroup returns a ContactGroup (with defaults, if applicable)
|
||||
func NewContactGroup() *ContactGroup {
|
||||
return &ContactGroup{
|
||||
Escalations: make([]*ContactGroupEscalation, config.NumSeverityLevels),
|
||||
Reminders: make([]uint, config.NumSeverityLevels),
|
||||
Contacts: ContactGroupContacts{
|
||||
External: []ContactGroupContactsExternal{},
|
||||
Users: []ContactGroupContactsUser{},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// FetchContactGroup retrieves contact group with passed cid.
|
||||
func (a *API) FetchContactGroup(cid CIDType) (*ContactGroup, error) {
|
||||
if cid == nil || *cid == "" {
|
||||
return nil, fmt.Errorf("Invalid contact group CID [none]")
|
||||
}
|
||||
|
||||
groupCID := string(*cid)
|
||||
|
||||
matched, err := regexp.MatchString(config.ContactGroupCIDRegex, groupCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !matched {
|
||||
return nil, fmt.Errorf("Invalid contact group CID [%s]", groupCID)
|
||||
}
|
||||
|
||||
result, err := a.Get(groupCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if a.Debug {
|
||||
a.Log.Printf("[DEBUG] fetch contact group, received JSON: %s", string(result))
|
||||
}
|
||||
|
||||
group := new(ContactGroup)
|
||||
if err := json.Unmarshal(result, group); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return group, nil
|
||||
}
|
||||
|
||||
// FetchContactGroups retrieves all contact groups available to the API Token.
|
||||
func (a *API) FetchContactGroups() (*[]ContactGroup, error) {
|
||||
result, err := a.Get(config.ContactGroupPrefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var groups []ContactGroup
|
||||
if err := json.Unmarshal(result, &groups); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &groups, nil
|
||||
}
|
||||
|
||||
// UpdateContactGroup updates passed contact group.
|
||||
func (a *API) UpdateContactGroup(cfg *ContactGroup) (*ContactGroup, error) {
|
||||
if cfg == nil {
|
||||
return nil, fmt.Errorf("Invalid contact group config [nil]")
|
||||
}
|
||||
|
||||
groupCID := string(cfg.CID)
|
||||
|
||||
matched, err := regexp.MatchString(config.ContactGroupCIDRegex, groupCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !matched {
|
||||
return nil, fmt.Errorf("Invalid contact group CID [%s]", groupCID)
|
||||
}
|
||||
|
||||
jsonCfg, err := json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if a.Debug {
|
||||
a.Log.Printf("[DEBUG] update contact group, sending JSON: %s", string(jsonCfg))
|
||||
}
|
||||
|
||||
result, err := a.Put(groupCID, jsonCfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
group := &ContactGroup{}
|
||||
if err := json.Unmarshal(result, group); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return group, nil
|
||||
}
|
||||
|
||||
// CreateContactGroup creates a new contact group.
|
||||
func (a *API) CreateContactGroup(cfg *ContactGroup) (*ContactGroup, error) {
|
||||
if cfg == nil {
|
||||
return nil, fmt.Errorf("Invalid contact group config [nil]")
|
||||
}
|
||||
|
||||
jsonCfg, err := json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if a.Debug {
|
||||
a.Log.Printf("[DEBUG] create contact group, sending JSON: %s", string(jsonCfg))
|
||||
}
|
||||
|
||||
result, err := a.Post(config.ContactGroupPrefix, jsonCfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
group := &ContactGroup{}
|
||||
if err := json.Unmarshal(result, group); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return group, nil
|
||||
}
|
||||
|
||||
// DeleteContactGroup deletes passed contact group.
|
||||
func (a *API) DeleteContactGroup(cfg *ContactGroup) (bool, error) {
|
||||
if cfg == nil {
|
||||
return false, fmt.Errorf("Invalid contact group config [nil]")
|
||||
}
|
||||
return a.DeleteContactGroupByCID(CIDType(&cfg.CID))
|
||||
}
|
||||
|
||||
// DeleteContactGroupByCID deletes contact group with passed cid.
|
||||
func (a *API) DeleteContactGroupByCID(cid CIDType) (bool, error) {
|
||||
if cid == nil || *cid == "" {
|
||||
return false, fmt.Errorf("Invalid contact group CID [none]")
|
||||
}
|
||||
|
||||
groupCID := string(*cid)
|
||||
|
||||
matched, err := regexp.MatchString(config.ContactGroupCIDRegex, groupCID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !matched {
|
||||
return false, fmt.Errorf("Invalid contact group CID [%s]", groupCID)
|
||||
}
|
||||
|
||||
_, err = a.Delete(groupCID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// SearchContactGroups returns contact groups matching the specified
|
||||
// search query and/or filter. If nil is passed for both parameters
|
||||
// all contact groups will be returned.
|
||||
func (a *API) SearchContactGroups(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]ContactGroup, error) {
|
||||
q := url.Values{}
|
||||
|
||||
if searchCriteria != nil && *searchCriteria != "" {
|
||||
q.Set("search", string(*searchCriteria))
|
||||
}
|
||||
|
||||
if filterCriteria != nil && len(*filterCriteria) > 0 {
|
||||
for filter, criteria := range *filterCriteria {
|
||||
for _, val := range criteria {
|
||||
q.Add(filter, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if q.Encode() == "" {
|
||||
return a.FetchContactGroups()
|
||||
}
|
||||
|
||||
reqURL := url.URL{
|
||||
Path: config.ContactGroupPrefix,
|
||||
RawQuery: q.Encode(),
|
||||
}
|
||||
|
||||
result, err := a.Get(reqURL.String())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("[ERROR] API call error %+v", err)
|
||||
}
|
||||
|
||||
var groups []ContactGroup
|
||||
if err := json.Unmarshal(result, &groups); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &groups, nil
|
||||
}
|
||||
390
vendor/github.com/circonus-labs/circonus-gometrics/api/dashboard-example.json
generated
vendored
Normal file
390
vendor/github.com/circonus-labs/circonus-gometrics/api/dashboard-example.json
generated
vendored
Normal file
@@ -0,0 +1,390 @@
|
||||
{
|
||||
"_active": true,
|
||||
"_cid": "/dashboard/1234",
|
||||
"_created": 1483193930,
|
||||
"_created_by": "/user/1234",
|
||||
"_dashboard_uuid": "01234567-89ab-cdef-0123-456789abcdef",
|
||||
"_last_modified": 1483450351,
|
||||
"account_default": false,
|
||||
"grid_layout": {
|
||||
"height": 4,
|
||||
"width": 4
|
||||
},
|
||||
"options": {
|
||||
"access_configs": [
|
||||
],
|
||||
"fullscreen_hide_title": false,
|
||||
"hide_grid": false,
|
||||
"linkages": [
|
||||
],
|
||||
"scale_text": true,
|
||||
"text_size": 16
|
||||
},
|
||||
"shared": false,
|
||||
"title": "foo bar baz",
|
||||
"widgets": [
|
||||
{
|
||||
"active": true,
|
||||
"height": 1,
|
||||
"name": "Cluster",
|
||||
"origin": "d0",
|
||||
"settings": {
|
||||
"account_id": "1234",
|
||||
"algorithm": "cor",
|
||||
"cluster_id": 1234,
|
||||
"cluster_name": "test",
|
||||
"layout": "compact",
|
||||
"size": "medium",
|
||||
"threshold": 0.7
|
||||
},
|
||||
"type": "cluster",
|
||||
"widget_id": "w4",
|
||||
"width": 1
|
||||
},
|
||||
{
|
||||
"active": true,
|
||||
"height": 1,
|
||||
"name": "HTML",
|
||||
"origin": "d1",
|
||||
"settings": {
|
||||
"markup": "<h1>foo</h1>",
|
||||
"title": "html"
|
||||
},
|
||||
"type": "html",
|
||||
"widget_id": "w9",
|
||||
"width": 1
|
||||
},
|
||||
{
|
||||
"active": true,
|
||||
"height": 1,
|
||||
"name": "Chart",
|
||||
"origin": "c0",
|
||||
"settings": {
|
||||
"chart_type": "bar",
|
||||
"datapoints": [
|
||||
{
|
||||
"_check_id": 1234,
|
||||
"_metric_type": "numeric",
|
||||
"account_id": "1234",
|
||||
"label": "Used",
|
||||
"metric": "01234567-89ab-cdef-0123-456789abcdef:vm`memory`used"
|
||||
},
|
||||
{
|
||||
"_check_id": 1234,
|
||||
"_metric_type": "numeric",
|
||||
"account_id": "1234",
|
||||
"label": "Free",
|
||||
"metric": "01234567-89ab-cdef-0123-456789abcdef:vm`memory`free"
|
||||
}
|
||||
],
|
||||
"definition": {
|
||||
"datasource": "realtime",
|
||||
"derive": "gauge",
|
||||
"disable_autoformat": false,
|
||||
"formula": "",
|
||||
"legend": {
|
||||
"show": false,
|
||||
"type": "html"
|
||||
},
|
||||
"period": 0,
|
||||
"pop_onhover": false,
|
||||
"wedge_labels": {
|
||||
"on_chart": true,
|
||||
"tooltips": false
|
||||
},
|
||||
"wedge_values": {
|
||||
"angle": "0",
|
||||
"color": "background",
|
||||
"show": true
|
||||
}
|
||||
},
|
||||
"title": "chart graph"
|
||||
},
|
||||
"type": "chart",
|
||||
"widget_id": "w5",
|
||||
"width": 1
|
||||
},
|
||||
{
|
||||
"active": true,
|
||||
"height": 1,
|
||||
"name": "Alerts",
|
||||
"origin": "a0",
|
||||
"settings": {
|
||||
"account_id": "1234",
|
||||
"acknowledged": "all",
|
||||
"cleared": "all",
|
||||
"contact_groups": [
|
||||
],
|
||||
"dependents": "all",
|
||||
"display": "list",
|
||||
"maintenance": "all",
|
||||
"min_age": "0",
|
||||
"off_hours": [
|
||||
17,
|
||||
9
|
||||
],
|
||||
"search": "",
|
||||
"severity": "12345",
|
||||
"tag_filter_set": [
|
||||
],
|
||||
"time_window": "30M",
|
||||
"title": "alerts",
|
||||
"week_days": [
|
||||
"sun",
|
||||
"mon",
|
||||
"tue",
|
||||
"wed",
|
||||
"thu",
|
||||
"fri",
|
||||
"sat"
|
||||
]
|
||||
},
|
||||
"type": "alerts",
|
||||
"widget_id": "w2",
|
||||
"width": 1
|
||||
},
|
||||
{
|
||||
"active": true,
|
||||
"height": 1,
|
||||
"name": "Graph",
|
||||
"origin": "c1",
|
||||
"settings": {
|
||||
"_graph_title": "foo bar / %Used",
|
||||
"account_id": "1234",
|
||||
"date_window": "2w",
|
||||
"graph_id": "01234567-89ab-cdef-0123-456789abcdef",
|
||||
"hide_xaxis": false,
|
||||
"hide_yaxis": false,
|
||||
"key_inline": false,
|
||||
"key_loc": "noop",
|
||||
"key_size": "1",
|
||||
"key_wrap": false,
|
||||
"label": "",
|
||||
"overlay_set_id": "",
|
||||
"period": "2000",
|
||||
"previous_graph_id": "null",
|
||||
"realtime": false,
|
||||
"show_flags": false
|
||||
},
|
||||
"type": "graph",
|
||||
"widget_id": "w8",
|
||||
"width": 1
|
||||
},
|
||||
{
|
||||
"active": true,
|
||||
"height": 1,
|
||||
"name": "List",
|
||||
"origin": "a2",
|
||||
"settings": {
|
||||
"account_id": "1234",
|
||||
"limit": "10",
|
||||
"search": "",
|
||||
"type": "graph"
|
||||
},
|
||||
"type": "list",
|
||||
"widget_id": "w10",
|
||||
"width": 1
|
||||
},
|
||||
{
|
||||
"active": true,
|
||||
"height": 1,
|
||||
"name": "Status",
|
||||
"origin": "b2",
|
||||
"settings": {
|
||||
"account_id": "1234",
|
||||
"agent_status_settings": {
|
||||
"search": "",
|
||||
"show_agent_types": "both",
|
||||
"show_contact": false,
|
||||
"show_feeds": true,
|
||||
"show_setup": false,
|
||||
"show_skew": true,
|
||||
"show_updates": true
|
||||
},
|
||||
"content_type": "agent_status",
|
||||
"host_status_settings": {
|
||||
"layout_style": "grid",
|
||||
"search": "",
|
||||
"sort_by": "alerts",
|
||||
"tag_filter_set": [
|
||||
]
|
||||
}
|
||||
},
|
||||
"type": "status",
|
||||
"widget_id": "w11",
|
||||
"width": 1
|
||||
},
|
||||
{
|
||||
"active": true,
|
||||
"height": 1,
|
||||
"name": "Text",
|
||||
"origin": "d2",
|
||||
"settings": {
|
||||
"autoformat": false,
|
||||
"body_format": "<p>{metric_name} ({value_type})<br /><strong>{metric_value}</strong><br /><span class=\"date\">{value_date}</span></p>",
|
||||
"datapoints": [
|
||||
{
|
||||
"_cluster_title": "test",
|
||||
"_label": "Cluster: test",
|
||||
"account_id": "1234",
|
||||
"cluster_id": 1234,
|
||||
"numeric_only": false
|
||||
}
|
||||
],
|
||||
"period": 0,
|
||||
"title_format": "Metric Status",
|
||||
"use_default": true,
|
||||
"value_type": "gauge"
|
||||
},
|
||||
"type": "text",
|
||||
"widget_id": "w13",
|
||||
"width": 1
|
||||
},
|
||||
{
|
||||
"active": true,
|
||||
"height": 1,
|
||||
"name": "Chart",
|
||||
"origin": "b0",
|
||||
"settings": {
|
||||
"chart_type": "bar",
|
||||
"datapoints": [
|
||||
{
|
||||
"_cluster_title": "test",
|
||||
"_label": "Cluster: test",
|
||||
"account_id": "1234",
|
||||
"cluster_id": 1234,
|
||||
"numeric_only": true
|
||||
}
|
||||
],
|
||||
"definition": {
|
||||
"datasource": "realtime",
|
||||
"derive": "gauge",
|
||||
"disable_autoformat": false,
|
||||
"formula": "",
|
||||
"legend": {
|
||||
"show": false,
|
||||
"type": "html"
|
||||
},
|
||||
"period": 0,
|
||||
"pop_onhover": false,
|
||||
"wedge_labels": {
|
||||
"on_chart": true,
|
||||
"tooltips": false
|
||||
},
|
||||
"wedge_values": {
|
||||
"angle": "0",
|
||||
"color": "background",
|
||||
"show": true
|
||||
}
|
||||
},
|
||||
"title": "chart metric cluster"
|
||||
},
|
||||
"type": "chart",
|
||||
"widget_id": "w3",
|
||||
"width": 1
|
||||
},
|
||||
{
|
||||
"active": true,
|
||||
"height": 1,
|
||||
"name": "Gauge",
|
||||
"origin": "b1",
|
||||
"settings": {
|
||||
"_check_id": 1234,
|
||||
"account_id": "1234",
|
||||
"check_uuid": "01234567-89ab-cdef-0123-456789abcdef",
|
||||
"disable_autoformat": false,
|
||||
"formula": "",
|
||||
"metric_display_name": "%Used",
|
||||
"metric_name": "fs`/foo`df_used_percent",
|
||||
"period": 0,
|
||||
"range_high": 100,
|
||||
"range_low": 0,
|
||||
"thresholds": {
|
||||
"colors": [
|
||||
"#008000",
|
||||
"#ffcc00",
|
||||
"#ee0000"
|
||||
],
|
||||
"flip": false,
|
||||
"values": [
|
||||
"75%",
|
||||
"87.5%"
|
||||
]
|
||||
},
|
||||
"title": "Metric Gauge",
|
||||
"type": "bar",
|
||||
"value_type": "gauge"
|
||||
},
|
||||
"type": "gauge",
|
||||
"widget_id": "w7",
|
||||
"width": 1
|
||||
},
|
||||
{
|
||||
"active": true,
|
||||
"height": 1,
|
||||
"name": "Text",
|
||||
"origin": "c2",
|
||||
"settings": {
|
||||
"autoformat": false,
|
||||
"body_format": "<p>{metric_name} ({value_type})<br /><strong>{metric_value}</strong><br /><span class=\"date\">{value_date}</span></p>",
|
||||
"datapoints": [
|
||||
{
|
||||
"_check_id": 1234,
|
||||
"_metric_type": "numeric",
|
||||
"account_id": "1234",
|
||||
"label": "cache entries",
|
||||
"metric": "01234567-89ab-cdef-0123-456789abcdef:foo`cache_entries"
|
||||
},
|
||||
{
|
||||
"_check_id": 1234,
|
||||
"_metric_type": "numeric",
|
||||
"account_id": "1234",
|
||||
"label": "cache capacity",
|
||||
"metric": "01234567-89ab-cdef-0123-456789abcdef:foo`cache_capacity"
|
||||
},
|
||||
{
|
||||
"_check_id": 1234,
|
||||
"_metric_type": "numeric",
|
||||
"account_id": "1234",
|
||||
"label": "cache size",
|
||||
"metric": "01234567-89ab-cdef-0123-456789abcdef:foo`cache_size"
|
||||
}
|
||||
],
|
||||
"period": 0,
|
||||
"title_format": "Metric Status",
|
||||
"use_default": true,
|
||||
"value_type": "gauge"
|
||||
},
|
||||
"type": "text",
|
||||
"widget_id": "w12",
|
||||
"width": 1
|
||||
},
|
||||
{
|
||||
"active": true,
|
||||
"height": 1,
|
||||
"name": "Forecast",
|
||||
"origin": "a1",
|
||||
"settings": {
|
||||
"format": "standard",
|
||||
"resource_limit": "0",
|
||||
"resource_usage": "metric:average(\"01234567-89ab-cdef-0123-456789abcdef\",p\"fs%60/foo%60df_used_percent\")",
|
||||
"thresholds": {
|
||||
"colors": [
|
||||
"#008000",
|
||||
"#ffcc00",
|
||||
"#ee0000"
|
||||
],
|
||||
"values": [
|
||||
"1d",
|
||||
"1h"
|
||||
]
|
||||
},
|
||||
"title": "Resource Forecast",
|
||||
"trend": "auto"
|
||||
},
|
||||
"type": "forecast",
|
||||
"widget_id": "w6",
|
||||
"width": 1
|
||||
}
|
||||
]
|
||||
}
|
||||
399
vendor/github.com/circonus-labs/circonus-gometrics/api/dashboard.go
generated
vendored
Normal file
399
vendor/github.com/circonus-labs/circonus-gometrics/api/dashboard.go
generated
vendored
Normal file
@@ -0,0 +1,399 @@
|
||||
// Copyright 2016 Circonus, Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Dashboard API support - Fetch, Create, Update, Delete, and Search
|
||||
// See: https://login.circonus.com/resources/api/calls/dashboard
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
|
||||
"github.com/circonus-labs/circonus-gometrics/api/config"
|
||||
)
|
||||
|
||||
// DashboardGridLayout defines layout
|
||||
type DashboardGridLayout struct {
|
||||
Height uint `json:"height"`
|
||||
Width uint `json:"width"`
|
||||
}
|
||||
|
||||
// DashboardAccessConfig defines access config
|
||||
type DashboardAccessConfig struct {
|
||||
BlackDash bool `json:"black_dash,omitempty"`
|
||||
Enabled bool `json:"enabled,omitempty"`
|
||||
Fullscreen bool `json:"fullscreen,omitempty"`
|
||||
FullscreenHideTitle bool `json:"fullscreen_hide_title,omitempty"`
|
||||
Nickname string `json:"nickname,omitempty"`
|
||||
ScaleText bool `json:"scale_text,omitempty"`
|
||||
SharedID string `json:"shared_id,omitempty"`
|
||||
TextSize uint `json:"text_size,omitempty"`
|
||||
}
|
||||
|
||||
// DashboardOptions defines options
|
||||
type DashboardOptions struct {
|
||||
AccessConfigs []DashboardAccessConfig `json:"access_configs,omitempty"`
|
||||
FullscreenHideTitle bool `json:"fullscreen_hide_title,omitempty"`
|
||||
HideGrid bool `json:"hide_grid,omitempty"`
|
||||
Linkages [][]string `json:"linkages,omitempty"`
|
||||
ScaleText bool `json:"scale_text,omitempty"`
|
||||
TextSize uint `json:"text_size,omitempty"`
|
||||
}
|
||||
|
||||
// ChartTextWidgetDatapoint defines datapoints for charts
|
||||
type ChartTextWidgetDatapoint struct {
|
||||
AccountID string `json:"account_id,omitempty"` // metric cluster, metric
|
||||
CheckID uint `json:"_check_id,omitempty"` // metric
|
||||
ClusterID uint `json:"cluster_id,omitempty"` // metric cluster
|
||||
ClusterTitle string `json:"_cluster_title,omitempty"` // metric cluster
|
||||
Label string `json:"label,omitempty"` // metric
|
||||
Label2 string `json:"_label,omitempty"` // metric cluster
|
||||
Metric string `json:"metric,omitempty"` // metric
|
||||
MetricType string `json:"_metric_type,omitempty"` // metric
|
||||
NumericOnly bool `json:"numeric_only,omitempty"` // metric cluster
|
||||
}
|
||||
|
||||
// ChartWidgetDefinitionLegend defines chart widget definition legend
|
||||
type ChartWidgetDefinitionLegend struct {
|
||||
Show bool `json:"show,omitempty"`
|
||||
Type string `json:"type,omitempty"`
|
||||
}
|
||||
|
||||
// ChartWidgetWedgeLabels defines chart widget wedge labels
|
||||
type ChartWidgetWedgeLabels struct {
|
||||
OnChart bool `json:"on_chart,omitempty"`
|
||||
ToolTips bool `json:"tooltips,omitempty"`
|
||||
}
|
||||
|
||||
// ChartWidgetWedgeValues defines chart widget wedge values
|
||||
type ChartWidgetWedgeValues struct {
|
||||
Angle string `json:"angle,omitempty"`
|
||||
Color string `json:"color,omitempty"`
|
||||
Show bool `json:"show,omitempty"`
|
||||
}
|
||||
|
||||
// ChartWidgtDefinition defines chart widget definition
|
||||
type ChartWidgtDefinition struct {
|
||||
Datasource string `json:"datasource,omitempty"`
|
||||
Derive string `json:"derive,omitempty"`
|
||||
DisableAutoformat bool `json:"disable_autoformat,omitempty"`
|
||||
Formula string `json:"formula,omitempty"`
|
||||
Legend ChartWidgetDefinitionLegend `json:"legend,omitempty"`
|
||||
Period uint `json:"period,omitempty"`
|
||||
PopOnHover bool `json:"pop_onhover,omitempty"`
|
||||
WedgeLabels ChartWidgetWedgeLabels `json:"wedge_labels,omitempty"`
|
||||
WedgeValues ChartWidgetWedgeValues `json:"wedge_values,omitempty"`
|
||||
}
|
||||
|
||||
// ForecastGaugeWidgetThresholds defines forecast widget thresholds
|
||||
type ForecastGaugeWidgetThresholds struct {
|
||||
Colors []string `json:"colors,omitempty"` // forecasts, gauges
|
||||
Flip bool `json:"flip,omitempty"` // gauges
|
||||
Values []string `json:"values,omitempty"` // forecasts, gauges
|
||||
}
|
||||
|
||||
// StatusWidgetAgentStatusSettings defines agent status settings
|
||||
type StatusWidgetAgentStatusSettings struct {
|
||||
Search string `json:"search,omitempty"`
|
||||
ShowAgentTypes string `json:"show_agent_types,omitempty"`
|
||||
ShowContact bool `json:"show_contact,omitempty"`
|
||||
ShowFeeds bool `json:"show_feeds,omitempty"`
|
||||
ShowSetup bool `json:"show_setup,omitempty"`
|
||||
ShowSkew bool `json:"show_skew,omitempty"`
|
||||
ShowUpdates bool `json:"show_updates,omitempty"`
|
||||
}
|
||||
|
||||
// StatusWidgetHostStatusSettings defines host status settings
|
||||
type StatusWidgetHostStatusSettings struct {
|
||||
LayoutStyle string `json:"layout_style,omitempty"`
|
||||
Search string `json:"search,omitempty"`
|
||||
SortBy string `json:"sort_by,omitempty"`
|
||||
TagFilterSet []string `json:"tag_filter_set,omitempty"`
|
||||
}
|
||||
|
||||
// DashboardWidgetSettings defines settings specific to widget
|
||||
type DashboardWidgetSettings struct {
|
||||
AccountID string `json:"account_id,omitempty"` // alerts, clusters, gauges, graphs, lists, status
|
||||
Acknowledged string `json:"acknowledged,omitempty"` // alerts
|
||||
AgentStatusSettings StatusWidgetAgentStatusSettings `json:"agent_status_settings,omitempty"` // status
|
||||
Algorithm string `json:"algorithm,omitempty"` // clusters
|
||||
Autoformat bool `json:"autoformat,omitempty"` // text
|
||||
BodyFormat string `json:"body_format,omitempty"` // text
|
||||
ChartType string `json:"chart_type,omitempty"` // charts
|
||||
CheckUUID string `json:"check_uuid,omitempty"` // gauges
|
||||
Cleared string `json:"cleared,omitempty"` // alerts
|
||||
ClusterID uint `json:"cluster_id,omitempty"` // clusters
|
||||
ClusterName string `json:"cluster_name,omitempty"` // clusters
|
||||
ContactGroups []uint `json:"contact_groups,omitempty"` // alerts
|
||||
ContentType string `json:"content_type,omitempty"` // status
|
||||
Datapoints []ChartTextWidgetDatapoint `json:"datapoints,omitempty"` // charts, text
|
||||
DateWindow string `json:"date_window,omitempty"` // graphs
|
||||
Definition ChartWidgtDefinition `json:"definition,omitempty"` // charts
|
||||
Dependents string `json:"dependents,omitempty"` // alerts
|
||||
DisableAutoformat bool `json:"disable_autoformat,omitempty"` // gauges
|
||||
Display string `json:"display,omitempty"` // alerts
|
||||
Format string `json:"format,omitempty"` // forecasts
|
||||
Formula string `json:"formula,omitempty"` // gauges
|
||||
GraphUUID string `json:"graph_id,omitempty"` // graphs
|
||||
HideXAxis bool `json:"hide_xaxis,omitempty"` // graphs
|
||||
HideYAxis bool `json:"hide_yaxis,omitempty"` // graphs
|
||||
HostStatusSettings StatusWidgetHostStatusSettings `json:"host_status_settings,omitempty"` // status
|
||||
KeyInline bool `json:"key_inline,omitempty"` // graphs
|
||||
KeyLoc string `json:"key_loc,omitempty"` // graphs
|
||||
KeySize string `json:"key_size,omitempty"` // graphs
|
||||
KeyWrap bool `json:"key_wrap,omitempty"` // graphs
|
||||
Label string `json:"label,omitempty"` // graphs
|
||||
Layout string `json:"layout,omitempty"` // clusters
|
||||
Limit string `json:"limit,omitempty"` // lists
|
||||
Maintenance string `json:"maintenance,omitempty"` // alerts
|
||||
Markup string `json:"markup,omitempty"` // html
|
||||
MetricDisplayName string `json:"metric_display_name,omitempty"` // gauges
|
||||
MetricName string `json:"metric_name,omitempty"` // gauges
|
||||
MinAge string `json:"min_age,omitempty"` // alerts
|
||||
OffHours []uint `json:"off_hours,omitempty"` // alerts
|
||||
OverlaySetID string `json:"overlay_set_id,omitempty"` // graphs
|
||||
Period interface{} `json:"period,omitempty"` // BUG type switching between widgets (doc: string; gauges, text: uint; graphs: string)
|
||||
RangeHigh int `json:"range_high,omitempty"` // gauges
|
||||
RangeLow int `json:"range_low,omitempty"` // gauges
|
||||
Realtime bool `json:"realtime,omitempty"` // graphs
|
||||
ResourceLimit string `json:"resource_limit,omitempty"` // forecasts
|
||||
ResourceUsage string `json:"resource_usage,omitempty"` // forecasts
|
||||
Search string `json:"search,omitempty"` // alerts, lists
|
||||
Severity string `json:"severity,omitempty"` // alerts
|
||||
ShowFlags bool `json:"show_flags,omitempty"` // graphs
|
||||
Size string `json:"size,omitempty"` // clusters
|
||||
TagFilterSet []string `json:"tag_filter_set,omitempty"` // alerts
|
||||
Threshold float32 `json:"threshold,omitempty"` // clusters
|
||||
Thresholds ForecastGaugeWidgetThresholds `json:"thresholds,omitempty"` // forecasts, gauges
|
||||
TimeWindow string `json:"time_window,omitempty"` // alerts
|
||||
Title string `json:"title,omitempty"` // alerts, charts, forecasts, gauges, html
|
||||
TitleFormat string `json:"title_format,omitempty"` // text
|
||||
Trend string `json:"trend,omitempty"` // forecasts
|
||||
Type string `json:"type,omitempty"` // gauges, lists
|
||||
UseDefault bool `json:"use_default,omitempty"` // text
|
||||
ValueType string `json:"value_type,omitempty"` // gauges, text
|
||||
WeekDays []string `json:"weekdays,omitempty"` // alerts
|
||||
}
|
||||
|
||||
// DashboardWidget defines widget
|
||||
type DashboardWidget struct {
|
||||
Active bool `json:"active"`
|
||||
Height uint `json:"height"`
|
||||
Name string `json:"name"`
|
||||
Origin string `json:"origin"`
|
||||
Settings DashboardWidgetSettings `json:"settings"`
|
||||
Type string `json:"type"`
|
||||
WidgetID string `json:"widget_id"`
|
||||
Width uint `json:"width"`
|
||||
}
|
||||
|
||||
// Dashboard defines a dashboard. See https://login.circonus.com/resources/api/calls/dashboard for more information.
|
||||
type Dashboard struct {
|
||||
AccountDefault bool `json:"account_default"`
|
||||
Active bool `json:"_active,omitempty"`
|
||||
CID string `json:"_cid,omitempty"`
|
||||
Created uint `json:"_created,omitempty"`
|
||||
CreatedBy string `json:"_created_by,omitempty"`
|
||||
GridLayout DashboardGridLayout `json:"grid_layout"`
|
||||
LastModified uint `json:"_last_modified,omitempty"`
|
||||
Options DashboardOptions `json:"options"`
|
||||
Shared bool `json:"shared"`
|
||||
Title string `json:"title"`
|
||||
UUID string `json:"_dashboard_uuid,omitempty"`
|
||||
Widgets []DashboardWidget `json:"widgets"`
|
||||
}
|
||||
|
||||
// NewDashboard returns a new Dashboard (with defaults, if applicable)
|
||||
func NewDashboard() *Dashboard {
|
||||
return &Dashboard{}
|
||||
}
|
||||
|
||||
// FetchDashboard retrieves dashboard with passed cid.
|
||||
func (a *API) FetchDashboard(cid CIDType) (*Dashboard, error) {
|
||||
if cid == nil || *cid == "" {
|
||||
return nil, fmt.Errorf("Invalid dashboard CID [none]")
|
||||
}
|
||||
|
||||
dashboardCID := string(*cid)
|
||||
|
||||
matched, err := regexp.MatchString(config.DashboardCIDRegex, dashboardCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !matched {
|
||||
return nil, fmt.Errorf("Invalid dashboard CID [%s]", dashboardCID)
|
||||
}
|
||||
|
||||
result, err := a.Get(string(*cid))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if a.Debug {
|
||||
a.Log.Printf("[DEBUG] fetch dashboard, received JSON: %s", string(result))
|
||||
}
|
||||
|
||||
dashboard := new(Dashboard)
|
||||
if err := json.Unmarshal(result, dashboard); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return dashboard, nil
|
||||
}
|
||||
|
||||
// FetchDashboards retrieves all dashboards available to the API Token.
|
||||
func (a *API) FetchDashboards() (*[]Dashboard, error) {
|
||||
result, err := a.Get(config.DashboardPrefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var dashboards []Dashboard
|
||||
if err := json.Unmarshal(result, &dashboards); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &dashboards, nil
|
||||
}
|
||||
|
||||
// UpdateDashboard updates passed dashboard.
|
||||
func (a *API) UpdateDashboard(cfg *Dashboard) (*Dashboard, error) {
|
||||
if cfg == nil {
|
||||
return nil, fmt.Errorf("Invalid dashboard config [nil]")
|
||||
}
|
||||
|
||||
dashboardCID := string(cfg.CID)
|
||||
|
||||
matched, err := regexp.MatchString(config.DashboardCIDRegex, dashboardCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !matched {
|
||||
return nil, fmt.Errorf("Invalid dashboard CID [%s]", dashboardCID)
|
||||
}
|
||||
|
||||
jsonCfg, err := json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if a.Debug {
|
||||
a.Log.Printf("[DEBUG] update dashboard, sending JSON: %s", string(jsonCfg))
|
||||
}
|
||||
|
||||
result, err := a.Put(dashboardCID, jsonCfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dashboard := &Dashboard{}
|
||||
if err := json.Unmarshal(result, dashboard); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return dashboard, nil
|
||||
}
|
||||
|
||||
// CreateDashboard creates a new dashboard.
|
||||
func (a *API) CreateDashboard(cfg *Dashboard) (*Dashboard, error) {
|
||||
if cfg == nil {
|
||||
return nil, fmt.Errorf("Invalid dashboard config [nil]")
|
||||
}
|
||||
|
||||
jsonCfg, err := json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if a.Debug {
|
||||
a.Log.Printf("[DEBUG] create dashboard, sending JSON: %s", string(jsonCfg))
|
||||
}
|
||||
|
||||
result, err := a.Post(config.DashboardPrefix, jsonCfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dashboard := &Dashboard{}
|
||||
if err := json.Unmarshal(result, dashboard); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return dashboard, nil
|
||||
}
|
||||
|
||||
// DeleteDashboard deletes passed dashboard.
|
||||
func (a *API) DeleteDashboard(cfg *Dashboard) (bool, error) {
|
||||
if cfg == nil {
|
||||
return false, fmt.Errorf("Invalid dashboard config [nil]")
|
||||
}
|
||||
return a.DeleteDashboardByCID(CIDType(&cfg.CID))
|
||||
}
|
||||
|
||||
// DeleteDashboardByCID deletes dashboard with passed cid.
|
||||
func (a *API) DeleteDashboardByCID(cid CIDType) (bool, error) {
|
||||
if cid == nil || *cid == "" {
|
||||
return false, fmt.Errorf("Invalid dashboard CID [none]")
|
||||
}
|
||||
|
||||
dashboardCID := string(*cid)
|
||||
|
||||
matched, err := regexp.MatchString(config.DashboardCIDRegex, dashboardCID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !matched {
|
||||
return false, fmt.Errorf("Invalid dashboard CID [%s]", dashboardCID)
|
||||
}
|
||||
|
||||
_, err = a.Delete(dashboardCID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// SearchDashboards returns dashboards matching the specified
|
||||
// search query and/or filter. If nil is passed for both parameters
|
||||
// all dashboards will be returned.
|
||||
func (a *API) SearchDashboards(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]Dashboard, error) {
|
||||
q := url.Values{}
|
||||
|
||||
if searchCriteria != nil && *searchCriteria != "" {
|
||||
q.Set("search", string(*searchCriteria))
|
||||
}
|
||||
|
||||
if filterCriteria != nil && len(*filterCriteria) > 0 {
|
||||
for filter, criteria := range *filterCriteria {
|
||||
for _, val := range criteria {
|
||||
q.Add(filter, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if q.Encode() == "" {
|
||||
return a.FetchDashboards()
|
||||
}
|
||||
|
||||
reqURL := url.URL{
|
||||
Path: config.DashboardPrefix,
|
||||
RawQuery: q.Encode(),
|
||||
}
|
||||
|
||||
result, err := a.Get(reqURL.String())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("[ERROR] API call error %+v", err)
|
||||
}
|
||||
|
||||
var dashboards []Dashboard
|
||||
if err := json.Unmarshal(result, &dashboards); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &dashboards, nil
|
||||
}
|
||||
63
vendor/github.com/circonus-labs/circonus-gometrics/api/doc.go
generated
vendored
Normal file
63
vendor/github.com/circonus-labs/circonus-gometrics/api/doc.go
generated
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
// Copyright 2016 Circonus, Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
Package api provides methods for interacting with the Circonus API. See the full Circonus API
|
||||
Documentation at https://login.circonus.com/resources/api for more information.
|
||||
|
||||
Raw REST methods
|
||||
|
||||
Get - retrieve existing item(s)
|
||||
Put - update an existing item
|
||||
Post - create a new item
|
||||
Delete - remove an existing item
|
||||
|
||||
Endpoints (supported)
|
||||
|
||||
Account https://login.circonus.com/resources/api/calls/account
|
||||
Acknowledgement https://login.circonus.com/resources/api/calls/acknowledgement
|
||||
Alert https://login.circonus.com/resources/api/calls/alert
|
||||
Annotation https://login.circonus.com/resources/api/calls/annotation
|
||||
Broker https://login.circonus.com/resources/api/calls/broker
|
||||
Check https://login.circonus.com/resources/api/calls/check
|
||||
Check Bundle https://login.circonus.com/resources/api/calls/check_bundle
|
||||
Check Bundle Metrics https://login.circonus.com/resources/api/calls/check_bundle_metrics
|
||||
Contact Group https://login.circonus.com/resources/api/calls/contact_group
|
||||
Dashboard https://login.circonus.com/resources/api/calls/dashboard
|
||||
Graph https://login.circonus.com/resources/api/calls/graph
|
||||
Maintenance [window] https://login.circonus.com/resources/api/calls/maintenance
|
||||
Metric https://login.circonus.com/resources/api/calls/metric
|
||||
Metric Cluster https://login.circonus.com/resources/api/calls/metric_cluster
|
||||
Outlier Report https://login.circonus.com/resources/api/calls/outlier_report
|
||||
Provision Broker https://login.circonus.com/resources/api/calls/provision_broker
|
||||
Rule Set https://login.circonus.com/resources/api/calls/rule_set
|
||||
Rule Set Group https://login.circonus.com/resources/api/calls/rule_set_group
|
||||
User https://login.circonus.com/resources/api/calls/user
|
||||
Worksheet https://login.circonus.com/resources/api/calls/worksheet
|
||||
|
||||
Endpoints (not supported)
|
||||
|
||||
Support may be added for these endpoints in the future. These endpoints may currently be used
|
||||
directly with the Raw REST methods above.
|
||||
|
||||
CAQL https://login.circonus.com/resources/api/calls/caql
|
||||
Check Move https://login.circonus.com/resources/api/calls/check_move
|
||||
Data https://login.circonus.com/resources/api/calls/data
|
||||
Snapshot https://login.circonus.com/resources/api/calls/snapshot
|
||||
Tag https://login.circonus.com/resources/api/calls/tag
|
||||
Template https://login.circonus.com/resources/api/calls/template
|
||||
|
||||
Verbs
|
||||
|
||||
Fetch singular/plural item(s) - e.g. FetchAnnotation, FetchAnnotations
|
||||
Create create new item - e.g. CreateAnnotation
|
||||
Update update an item - e.g. UpdateAnnotation
|
||||
Delete remove an item - e.g. DeleteAnnotation, DeleteAnnotationByCID
|
||||
Search search for item(s) - e.g. SearchAnnotations
|
||||
New new item config - e.g. NewAnnotation (returns an empty item,
|
||||
any applicable defautls defined)
|
||||
|
||||
Not all endpoints support all verbs.
|
||||
*/
|
||||
package api
|
||||
348
vendor/github.com/circonus-labs/circonus-gometrics/api/graph.go
generated
vendored
Normal file
348
vendor/github.com/circonus-labs/circonus-gometrics/api/graph.go
generated
vendored
Normal file
@@ -0,0 +1,348 @@
|
||||
// Copyright 2016 Circonus, Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Graph API support - Fetch, Create, Update, Delete, and Search
|
||||
// See: https://login.circonus.com/resources/api/calls/graph
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
|
||||
"github.com/circonus-labs/circonus-gometrics/api/config"
|
||||
)
|
||||
|
||||
// GraphAccessKey defines an access key for a graph
|
||||
type GraphAccessKey struct {
|
||||
Active bool `json:"active,omitempty"` // boolean
|
||||
Height uint `json:"height,omitempty"` // uint
|
||||
Key string `json:"key,omitempty"` // string
|
||||
Legend bool `json:"legend,omitempty"` // boolean
|
||||
LockDate bool `json:"lock_date,omitempty"` // boolean
|
||||
LockMode string `json:"lock_mode,omitempty"` // string
|
||||
LockRangeEnd uint `json:"lock_range_end,omitempty"` // uint
|
||||
LockRangeStart uint `json:"lock_range_start,omitempty"` // uint
|
||||
LockShowTimes bool `json:"lock_show_times,omitempty"` // boolean
|
||||
LockZoom string `json:"lock_zoom,omitempty"` // string
|
||||
Nickname string `json:"nickname,omitempty"` // string
|
||||
Title bool `json:"title,omitempty"` // boolean
|
||||
Width uint `json:"width,omitempty"` // uint
|
||||
XLabels bool `json:"x_labels,omitempty"` // boolean
|
||||
YLabels bool `json:"y_labels,omitempty"` // boolean
|
||||
}
|
||||
|
||||
// GraphComposite defines a composite
|
||||
type GraphComposite struct {
|
||||
Axis string `json:"axis,omitempty"` // string
|
||||
Color string `json:"color,omitempty"` // string
|
||||
DataFormula *string `json:"data_formula,omitempty"` // string or null
|
||||
Hidden bool `json:"hidden,omitempty"` // boolean
|
||||
LegendFormula *string `json:"legend_formula,omitempty"` // string or null
|
||||
Name string `json:"name,omitempty"` // string
|
||||
Stack *uint `json:"stack,omitempty"` // uint or null
|
||||
}
|
||||
|
||||
// GraphDatapoint defines a datapoint
|
||||
type GraphDatapoint struct {
|
||||
Alpha string `json:"alpha,omitempty"` // string
|
||||
Axis string `json:"axis,omitempty"` // string
|
||||
CAQL *string `json:"caql,omitempty"` // string or null
|
||||
CheckID uint `json:"check_id,omitempty"` // uint
|
||||
Color string `json:"color,omitempty"` // string
|
||||
DataFormula *string `json:"data_formula,omitempty"` // string or null
|
||||
Derive interface{} `json:"derive,omitempty"` // BUG doc: string, api: string or boolean(for caql statements)
|
||||
Hidden bool `json:"hidden,omitempty"` // boolean
|
||||
LegendFormula *string `json:"legend_formula,omitempty"` // string or null
|
||||
MetricName string `json:"metric_name,omitempty"` // string
|
||||
MetricType string `json:"metric_type,omitempty"` // string
|
||||
Name string `json:"name,omitempty"` // string
|
||||
Stack *uint `json:"stack,omitempty"` // uint or null
|
||||
}
|
||||
|
||||
// GraphGuide defines a guide
|
||||
type GraphGuide struct {
|
||||
Color string `json:"color,omitempty"` // string
|
||||
DataFormula *string `json:"data_formula,omitempty"` // string or null
|
||||
Hidden bool `json:"hidden,omitempty"` // boolean
|
||||
LegendFormula *string `json:"legend_formula,omitempty"` // string or null
|
||||
Name string `json:"name,omitempty"` // string
|
||||
}
|
||||
|
||||
// GraphMetricCluster defines a metric cluster
|
||||
type GraphMetricCluster struct {
|
||||
AggregateFunc string `json:"aggregation_function,omitempty"` // string
|
||||
Axis string `json:"axis,omitempty"` // string
|
||||
DataFormula *string `json:"data_formula,omitempty"` // string or null
|
||||
Hidden bool `json:"hidden,omitempty"` // boolean
|
||||
LegendFormula *string `json:"legend_formula,omitempty"` // string or null
|
||||
MetricCluster string `json:"metric_cluster,omitempty"` // string
|
||||
Name string `json:"name,omitempty"` // string
|
||||
Stack *uint `json:"stack,omitempty"` // uint or null
|
||||
}
|
||||
|
||||
// OverlayDataOptions defines overlay options for data. Note, each overlay type requires
|
||||
// a _subset_ of the options. See Graph API documentation (URL above) for details.
|
||||
type OverlayDataOptions struct {
|
||||
Alerts string `json:"alerts,omitempty"` // string BUG doc: numeric, api: string
|
||||
ArrayOutput string `json:"array_output,omitempty"` // string BUG doc: numeric, api: string
|
||||
BasePeriod string `json:"base_period,omitempty"` // string BUG doc: numeric, api: string
|
||||
Delay string `json:"delay,omitempty"` // string BUG doc: numeric, api: string
|
||||
Extension string `json:"extension,omitempty"` // string
|
||||
GraphTitle string `json:"graph_title,omitempty"` // string
|
||||
GraphUUID string `json:"graph_id,omitempty"` // string
|
||||
InPercent string `json:"in_percent,omitempty"` // string BUG doc: boolean, api: string
|
||||
Inverse string `json:"inverse,omitempty"` // string BUG doc: numeric, api: string
|
||||
Method string `json:"method,omitempty"` // string
|
||||
Model string `json:"model,omitempty"` // string
|
||||
ModelEnd string `json:"model_end,omitempty"` // string
|
||||
ModelPeriod string `json:"model_period,omitempty"` // string
|
||||
ModelRelative string `json:"model_relative,omitempty"` // string BUG doc: numeric, api: string
|
||||
Out string `json:"out,omitempty"` // string
|
||||
Prequel string `json:"prequel,omitempty"` // string
|
||||
Presets string `json:"presets,omitempty"` // string
|
||||
Quantiles string `json:"quantiles,omitempty"` // string
|
||||
SeasonLength string `json:"season_length,omitempty"` // string BUG doc: numeric, api: string
|
||||
Sensitivity string `json:"sensitivity,omitempty"` // string BUG doc: numeric, api: string
|
||||
SingleValue string `json:"single_value,omitempty"` // string BUG doc: numeric, api: string
|
||||
TargetPeriod string `json:"target_period,omitempty"` // string
|
||||
TimeOffset string `json:"time_offset,omitempty"` // string
|
||||
TimeShift string `json:"time_shift,omitempty"` // string BUG doc: numeric, api: string
|
||||
Transform string `json:"transform,omitempty"` // string
|
||||
Version string `json:"version,omitempty"` // string BUG doc: numeric, api: string
|
||||
Window string `json:"window,omitempty"` // string BUG doc: numeric, api: string
|
||||
XShift string `json:"x_shift,omitempty"` // string
|
||||
}
|
||||
|
||||
// OverlayUISpecs defines UI specs for overlay
|
||||
type OverlayUISpecs struct {
|
||||
Decouple bool `json:"decouple,omitempty"` // boolean
|
||||
ID string `json:"id,omitempty"` // string
|
||||
Label string `json:"label,omitempty"` // string
|
||||
Type string `json:"type,omitempty"` // string
|
||||
Z string `json:"z,omitempty"` // string BUG doc: numeric, api: string
|
||||
}
|
||||
|
||||
// GraphOverlaySet defines overlays for graph
|
||||
type GraphOverlaySet struct {
|
||||
DataOpts OverlayDataOptions `json:"data_opts,omitempty"` // OverlayDataOptions
|
||||
ID string `json:"id,omitempty"` // string
|
||||
Title string `json:"title,omitempty"` // string
|
||||
UISpecs OverlayUISpecs `json:"ui_specs,omitempty"` // OverlayUISpecs
|
||||
}
|
||||
|
||||
// Graph defines a graph. See https://login.circonus.com/resources/api/calls/graph for more information.
|
||||
type Graph struct {
|
||||
AccessKeys []GraphAccessKey `json:"access_keys,omitempty"` // [] len >= 0
|
||||
CID string `json:"_cid,omitempty"` // string
|
||||
Composites []GraphComposite `json:"composites,omitempty"` // [] len >= 0
|
||||
Datapoints []GraphDatapoint `json:"datapoints,omitempt"` // [] len >= 0
|
||||
Description string `json:"description,omitempty"` // string
|
||||
Guides []GraphGuide `json:"guides,omitempty"` // [] len >= 0
|
||||
LineStyle string `json:"line_style,omitempty"` // string
|
||||
LogLeftY int `json:"logarithmitc_left_y,omitempty"` // string or null BUG doc: number (not string)
|
||||
LogRightY int `json:"logarithmitc_right_y,omitempty"` // string or null BUG doc: number (not string)
|
||||
MaxLeftY *string `json:"max_left_y,omitempty"` // string or null BUG doc: number (not string)
|
||||
MaxRightY *string `json:"max_right_y,omitempty"` // string or null BUG doc: number (not string)
|
||||
MetricClusters []GraphMetricCluster `json:"metric_clusters,omitempty"` // [] len >= 0
|
||||
MinLeftY *string `json:"min_left_y,omitempty"` // string or null BUG doc: number (not string)
|
||||
MinRightY *string `json:"min_right_y,omitempty"` // string or null BUG doc: number (not string)
|
||||
Notes *string `json:"notes,omitempty"` // string or null
|
||||
OverlaySets *map[string]GraphOverlaySet `json:"overlay_sets,omitempty"` // GroupOverLaySets or null
|
||||
Style string `json:"style,omitempty"` // string
|
||||
Tags []string `json:"tags,omitempty"` // [] len >= 0
|
||||
Title string `json:"title,omitempty"` // string
|
||||
}
|
||||
|
||||
// NewGraph returns a Graph (with defaults, if applicable)
|
||||
func NewGraph() *Graph {
|
||||
return &Graph{}
|
||||
}
|
||||
|
||||
// FetchGraph retrieves graph with passed cid.
|
||||
func (a *API) FetchGraph(cid CIDType) (*Graph, error) {
|
||||
if cid == nil || *cid == "" {
|
||||
return nil, fmt.Errorf("Invalid graph CID [none]")
|
||||
}
|
||||
|
||||
graphCID := string(*cid)
|
||||
|
||||
matched, err := regexp.MatchString(config.GraphCIDRegex, graphCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !matched {
|
||||
return nil, fmt.Errorf("Invalid graph CID [%s]", graphCID)
|
||||
}
|
||||
|
||||
result, err := a.Get(graphCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if a.Debug {
|
||||
a.Log.Printf("[DEBUG] fetch graph, received JSON: %s", string(result))
|
||||
}
|
||||
|
||||
graph := new(Graph)
|
||||
if err := json.Unmarshal(result, graph); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return graph, nil
|
||||
}
|
||||
|
||||
// FetchGraphs retrieves all graphs available to the API Token.
|
||||
func (a *API) FetchGraphs() (*[]Graph, error) {
|
||||
result, err := a.Get(config.GraphPrefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var graphs []Graph
|
||||
if err := json.Unmarshal(result, &graphs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &graphs, nil
|
||||
}
|
||||
|
||||
// UpdateGraph updates passed graph.
|
||||
func (a *API) UpdateGraph(cfg *Graph) (*Graph, error) {
|
||||
if cfg == nil {
|
||||
return nil, fmt.Errorf("Invalid graph config [nil]")
|
||||
}
|
||||
|
||||
graphCID := string(cfg.CID)
|
||||
|
||||
matched, err := regexp.MatchString(config.GraphCIDRegex, graphCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !matched {
|
||||
return nil, fmt.Errorf("Invalid graph CID [%s]", graphCID)
|
||||
}
|
||||
|
||||
jsonCfg, err := json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if a.Debug {
|
||||
a.Log.Printf("[DEBUG] update graph, sending JSON: %s", string(jsonCfg))
|
||||
}
|
||||
|
||||
result, err := a.Put(graphCID, jsonCfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
graph := &Graph{}
|
||||
if err := json.Unmarshal(result, graph); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return graph, nil
|
||||
}
|
||||
|
||||
// CreateGraph creates a new graph.
|
||||
func (a *API) CreateGraph(cfg *Graph) (*Graph, error) {
|
||||
if cfg == nil {
|
||||
return nil, fmt.Errorf("Invalid graph config [nil]")
|
||||
}
|
||||
|
||||
jsonCfg, err := json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if a.Debug {
|
||||
a.Log.Printf("[DEBUG] update graph, sending JSON: %s", string(jsonCfg))
|
||||
}
|
||||
|
||||
result, err := a.Post(config.GraphPrefix, jsonCfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
graph := &Graph{}
|
||||
if err := json.Unmarshal(result, graph); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return graph, nil
|
||||
}
|
||||
|
||||
// DeleteGraph deletes passed graph.
|
||||
func (a *API) DeleteGraph(cfg *Graph) (bool, error) {
|
||||
if cfg == nil {
|
||||
return false, fmt.Errorf("Invalid graph config [nil]")
|
||||
}
|
||||
return a.DeleteGraphByCID(CIDType(&cfg.CID))
|
||||
}
|
||||
|
||||
// DeleteGraphByCID deletes graph with passed cid.
|
||||
func (a *API) DeleteGraphByCID(cid CIDType) (bool, error) {
|
||||
if cid == nil || *cid == "" {
|
||||
return false, fmt.Errorf("Invalid graph CID [none]")
|
||||
}
|
||||
|
||||
graphCID := string(*cid)
|
||||
|
||||
matched, err := regexp.MatchString(config.GraphCIDRegex, graphCID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !matched {
|
||||
return false, fmt.Errorf("Invalid graph CID [%s]", graphCID)
|
||||
}
|
||||
|
||||
_, err = a.Delete(graphCID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// SearchGraphs returns graphs matching the specified search query
|
||||
// and/or filter. If nil is passed for both parameters all graphs
|
||||
// will be returned.
|
||||
func (a *API) SearchGraphs(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]Graph, error) {
|
||||
q := url.Values{}
|
||||
|
||||
if searchCriteria != nil && *searchCriteria != "" {
|
||||
q.Set("search", string(*searchCriteria))
|
||||
}
|
||||
|
||||
if filterCriteria != nil && len(*filterCriteria) > 0 {
|
||||
for filter, criteria := range *filterCriteria {
|
||||
for _, val := range criteria {
|
||||
q.Add(filter, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if q.Encode() == "" {
|
||||
return a.FetchGraphs()
|
||||
}
|
||||
|
||||
reqURL := url.URL{
|
||||
Path: config.GraphPrefix,
|
||||
RawQuery: q.Encode(),
|
||||
}
|
||||
|
||||
result, err := a.Get(reqURL.String())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("[ERROR] API call error %+v", err)
|
||||
}
|
||||
|
||||
var graphs []Graph
|
||||
if err := json.Unmarshal(result, &graphs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &graphs, nil
|
||||
}
|
||||
220
vendor/github.com/circonus-labs/circonus-gometrics/api/maintenance.go
generated
vendored
Normal file
220
vendor/github.com/circonus-labs/circonus-gometrics/api/maintenance.go
generated
vendored
Normal file
@@ -0,0 +1,220 @@
|
||||
// Copyright 2016 Circonus, Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Maintenance window API support - Fetch, Create, Update, Delete, and Search
|
||||
// See: https://login.circonus.com/resources/api/calls/maintenance
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
|
||||
"github.com/circonus-labs/circonus-gometrics/api/config"
|
||||
)
|
||||
|
||||
// Maintenance defines a maintenance window. See https://login.circonus.com/resources/api/calls/maintenance for more information.
|
||||
type Maintenance struct {
|
||||
CID string `json:"_cid,omitempty"` // string
|
||||
Item string `json:"item,omitempty"` // string
|
||||
Notes string `json:"notes,omitempty"` // string
|
||||
Severities interface{} `json:"severities,omitempty"` // []string NOTE can be set with CSV string or []string
|
||||
Start uint `json:"start,omitempty"` // uint
|
||||
Stop uint `json:"stop,omitempty"` // uint
|
||||
Tags []string `json:"tags,omitempty"` // [] len >= 0
|
||||
Type string `json:"type,omitempty"` // string
|
||||
}
|
||||
|
||||
// NewMaintenanceWindow returns a new Maintenance window (with defaults, if applicable)
|
||||
func NewMaintenanceWindow() *Maintenance {
|
||||
return &Maintenance{}
|
||||
}
|
||||
|
||||
// FetchMaintenanceWindow retrieves maintenance [window] with passed cid.
|
||||
func (a *API) FetchMaintenanceWindow(cid CIDType) (*Maintenance, error) {
|
||||
if cid == nil || *cid == "" {
|
||||
return nil, fmt.Errorf("Invalid maintenance window CID [none]")
|
||||
}
|
||||
|
||||
maintenanceCID := string(*cid)
|
||||
|
||||
matched, err := regexp.MatchString(config.MaintenanceCIDRegex, maintenanceCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !matched {
|
||||
return nil, fmt.Errorf("Invalid maintenance window CID [%s]", maintenanceCID)
|
||||
}
|
||||
|
||||
result, err := a.Get(maintenanceCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if a.Debug {
|
||||
a.Log.Printf("[DEBUG] fetch maintenance window, received JSON: %s", string(result))
|
||||
}
|
||||
|
||||
window := &Maintenance{}
|
||||
if err := json.Unmarshal(result, window); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return window, nil
|
||||
}
|
||||
|
||||
// FetchMaintenanceWindows retrieves all maintenance [windows] available to API Token.
|
||||
func (a *API) FetchMaintenanceWindows() (*[]Maintenance, error) {
|
||||
result, err := a.Get(config.MaintenancePrefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var windows []Maintenance
|
||||
if err := json.Unmarshal(result, &windows); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &windows, nil
|
||||
}
|
||||
|
||||
// UpdateMaintenanceWindow updates passed maintenance [window].
|
||||
func (a *API) UpdateMaintenanceWindow(cfg *Maintenance) (*Maintenance, error) {
|
||||
if cfg == nil {
|
||||
return nil, fmt.Errorf("Invalid maintenance window config [nil]")
|
||||
}
|
||||
|
||||
maintenanceCID := string(cfg.CID)
|
||||
|
||||
matched, err := regexp.MatchString(config.MaintenanceCIDRegex, maintenanceCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !matched {
|
||||
return nil, fmt.Errorf("Invalid maintenance window CID [%s]", maintenanceCID)
|
||||
}
|
||||
|
||||
jsonCfg, err := json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if a.Debug {
|
||||
a.Log.Printf("[DEBUG] update maintenance window, sending JSON: %s", string(jsonCfg))
|
||||
}
|
||||
|
||||
result, err := a.Put(maintenanceCID, jsonCfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
window := &Maintenance{}
|
||||
if err := json.Unmarshal(result, window); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return window, nil
|
||||
}
|
||||
|
||||
// CreateMaintenanceWindow creates a new maintenance [window].
|
||||
func (a *API) CreateMaintenanceWindow(cfg *Maintenance) (*Maintenance, error) {
|
||||
if cfg == nil {
|
||||
return nil, fmt.Errorf("Invalid maintenance window config [nil]")
|
||||
}
|
||||
|
||||
jsonCfg, err := json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if a.Debug {
|
||||
a.Log.Printf("[DEBUG] create maintenance window, sending JSON: %s", string(jsonCfg))
|
||||
}
|
||||
|
||||
result, err := a.Post(config.MaintenancePrefix, jsonCfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
window := &Maintenance{}
|
||||
if err := json.Unmarshal(result, window); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return window, nil
|
||||
}
|
||||
|
||||
// DeleteMaintenanceWindow deletes passed maintenance [window].
|
||||
func (a *API) DeleteMaintenanceWindow(cfg *Maintenance) (bool, error) {
|
||||
if cfg == nil {
|
||||
return false, fmt.Errorf("Invalid maintenance window config [nil]")
|
||||
}
|
||||
return a.DeleteMaintenanceWindowByCID(CIDType(&cfg.CID))
|
||||
}
|
||||
|
||||
// DeleteMaintenanceWindowByCID deletes maintenance [window] with passed cid.
|
||||
func (a *API) DeleteMaintenanceWindowByCID(cid CIDType) (bool, error) {
|
||||
if cid == nil || *cid == "" {
|
||||
return false, fmt.Errorf("Invalid maintenance window CID [none]")
|
||||
}
|
||||
|
||||
maintenanceCID := string(*cid)
|
||||
|
||||
matched, err := regexp.MatchString(config.MaintenanceCIDRegex, maintenanceCID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !matched {
|
||||
return false, fmt.Errorf("Invalid maintenance window CID [%s]", maintenanceCID)
|
||||
}
|
||||
|
||||
_, err = a.Delete(maintenanceCID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// SearchMaintenanceWindows returns maintenance [windows] matching
|
||||
// the specified search query and/or filter. If nil is passed for
|
||||
// both parameters all maintenance [windows] will be returned.
|
||||
func (a *API) SearchMaintenanceWindows(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]Maintenance, error) {
|
||||
q := url.Values{}
|
||||
|
||||
if searchCriteria != nil && *searchCriteria != "" {
|
||||
q.Set("search", string(*searchCriteria))
|
||||
}
|
||||
|
||||
if filterCriteria != nil && len(*filterCriteria) > 0 {
|
||||
for filter, criteria := range *filterCriteria {
|
||||
for _, val := range criteria {
|
||||
q.Add(filter, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if q.Encode() == "" {
|
||||
return a.FetchMaintenanceWindows()
|
||||
}
|
||||
|
||||
reqURL := url.URL{
|
||||
Path: config.MaintenancePrefix,
|
||||
RawQuery: q.Encode(),
|
||||
}
|
||||
|
||||
result, err := a.Get(reqURL.String())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("[ERROR] API call error %+v", err)
|
||||
}
|
||||
|
||||
var windows []Maintenance
|
||||
if err := json.Unmarshal(result, &windows); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &windows, nil
|
||||
}
|
||||
162
vendor/github.com/circonus-labs/circonus-gometrics/api/metric.go
generated
vendored
Normal file
162
vendor/github.com/circonus-labs/circonus-gometrics/api/metric.go
generated
vendored
Normal file
@@ -0,0 +1,162 @@
|
||||
// Copyright 2016 Circonus, Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Metric API support - Fetch, Create*, Update, Delete*, and Search
|
||||
// See: https://login.circonus.com/resources/api/calls/metric
|
||||
// * : create and delete are handled via check_bundle or check_bundle_metrics
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
|
||||
"github.com/circonus-labs/circonus-gometrics/api/config"
|
||||
)
|
||||
|
||||
// Metric defines a metric. See https://login.circonus.com/resources/api/calls/metric for more information.
|
||||
type Metric struct {
|
||||
Active bool `json:"_active,omitempty"` // boolean
|
||||
CheckActive bool `json:"_check_active,omitempty"` // boolean
|
||||
CheckBundleCID string `json:"_check_bundle,omitempty"` // string
|
||||
CheckCID string `json:"_check,omitempty"` // string
|
||||
CheckTags []string `json:"_check_tags,omitempty"` // [] len >= 0
|
||||
CheckUUID string `json:"_check_uuid,omitempty"` // string
|
||||
CID string `json:"_cid,omitempty"` // string
|
||||
Histogram bool `json:"_histogram,omitempty"` // boolean
|
||||
Link *string `json:"link,omitempty"` // string or null
|
||||
MetricName string `json:"_metric_name,omitempty"` // string
|
||||
MetricType string `json:"_metric_type,omitempty"` // string
|
||||
Notes *string `json:"notes,omitempty"` // string or null
|
||||
Tags []string `json:"tags,omitempty"` // [] len >= 0
|
||||
Units *string `json:"units,omitempty"` // string or null
|
||||
}
|
||||
|
||||
// FetchMetric retrieves metric with passed cid.
|
||||
func (a *API) FetchMetric(cid CIDType) (*Metric, error) {
|
||||
if cid == nil || *cid == "" {
|
||||
return nil, fmt.Errorf("Invalid metric CID [none]")
|
||||
}
|
||||
|
||||
metricCID := string(*cid)
|
||||
|
||||
matched, err := regexp.MatchString(config.MetricCIDRegex, metricCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !matched {
|
||||
return nil, fmt.Errorf("Invalid metric CID [%s]", metricCID)
|
||||
}
|
||||
|
||||
result, err := a.Get(metricCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if a.Debug {
|
||||
a.Log.Printf("[DEBUG] fetch metric, received JSON: %s", string(result))
|
||||
}
|
||||
|
||||
metric := &Metric{}
|
||||
if err := json.Unmarshal(result, metric); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return metric, nil
|
||||
}
|
||||
|
||||
// FetchMetrics retrieves all metrics available to API Token.
|
||||
func (a *API) FetchMetrics() (*[]Metric, error) {
|
||||
result, err := a.Get(config.MetricPrefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var metrics []Metric
|
||||
if err := json.Unmarshal(result, &metrics); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &metrics, nil
|
||||
}
|
||||
|
||||
// UpdateMetric updates passed metric.
|
||||
func (a *API) UpdateMetric(cfg *Metric) (*Metric, error) {
|
||||
if cfg == nil {
|
||||
return nil, fmt.Errorf("Invalid metric config [nil]")
|
||||
}
|
||||
|
||||
metricCID := string(cfg.CID)
|
||||
|
||||
matched, err := regexp.MatchString(config.MetricCIDRegex, metricCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !matched {
|
||||
return nil, fmt.Errorf("Invalid metric CID [%s]", metricCID)
|
||||
}
|
||||
|
||||
jsonCfg, err := json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if a.Debug {
|
||||
a.Log.Printf("[DEBUG] update metric, sending JSON: %s", string(jsonCfg))
|
||||
}
|
||||
|
||||
result, err := a.Put(metricCID, jsonCfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
metric := &Metric{}
|
||||
if err := json.Unmarshal(result, metric); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return metric, nil
|
||||
}
|
||||
|
||||
// SearchMetrics returns metrics matching the specified search query
|
||||
// and/or filter. If nil is passed for both parameters all metrics
|
||||
// will be returned.
|
||||
func (a *API) SearchMetrics(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]Metric, error) {
|
||||
q := url.Values{}
|
||||
|
||||
if searchCriteria != nil && *searchCriteria != "" {
|
||||
q.Set("search", string(*searchCriteria))
|
||||
}
|
||||
|
||||
if filterCriteria != nil && len(*filterCriteria) > 0 {
|
||||
for filter, criteria := range *filterCriteria {
|
||||
for _, val := range criteria {
|
||||
q.Add(filter, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if q.Encode() == "" {
|
||||
return a.FetchMetrics()
|
||||
}
|
||||
|
||||
reqURL := url.URL{
|
||||
Path: config.MetricPrefix,
|
||||
RawQuery: q.Encode(),
|
||||
}
|
||||
|
||||
result, err := a.Get(reqURL.String())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("[ERROR] API call error %+v", err)
|
||||
}
|
||||
|
||||
var metrics []Metric
|
||||
if err := json.Unmarshal(result, &metrics); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &metrics, nil
|
||||
}
|
||||
276
vendor/github.com/circonus-labs/circonus-gometrics/api/metric_cluster.go
generated
vendored
276
vendor/github.com/circonus-labs/circonus-gometrics/api/metric_cluster.go
generated
vendored
@@ -2,6 +2,9 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Metric Cluster API support - Fetch, Create, Update, Delete, and Search
|
||||
// See: https://login.circonus.com/resources/api/calls/metric_cluster
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
@@ -9,6 +12,8 @@ import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
|
||||
"github.com/circonus-labs/circonus-gometrics/api/config"
|
||||
)
|
||||
|
||||
// MetricQuery object
|
||||
@@ -17,38 +22,40 @@ type MetricQuery struct {
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
// MetricCluster object
|
||||
// MetricCluster defines a metric cluster. See https://login.circonus.com/resources/api/calls/metric_cluster for more information.
|
||||
type MetricCluster struct {
|
||||
CID string `json:"_cid,omitempty"`
|
||||
MatchingMetrics []string `json:"_matching_metrics,omitempty"`
|
||||
MatchingUUIDMetrics map[string][]string `json:"_matching_uuid_metrics,omitempty"`
|
||||
Description string `json:"description"`
|
||||
Name string `json:"name"`
|
||||
Queries []MetricQuery `json:"queries"`
|
||||
Tags []string `json:"tags"`
|
||||
CID string `json:"_cid,omitempty"` // string
|
||||
Description string `json:"description"` // string
|
||||
MatchingMetrics []string `json:"_matching_metrics,omitempty"` // [] len >= 1 (result info only, if query has extras - cannot be set)
|
||||
MatchingUUIDMetrics map[string][]string `json:"_matching_uuid_metrics,omitempty"` // [] len >= 1 (result info only, if query has extras - cannot be set)
|
||||
Name string `json:"name"` // string
|
||||
Queries []MetricQuery `json:"queries"` // [] len >= 1
|
||||
Tags []string `json:"tags"` // [] len >= 0
|
||||
}
|
||||
|
||||
const baseMetricClusterPath = "/metric_cluster"
|
||||
// NewMetricCluster returns a new MetricCluster (with defaults, if applicable)
|
||||
func NewMetricCluster() *MetricCluster {
|
||||
return &MetricCluster{}
|
||||
}
|
||||
|
||||
// FetchMetricClusterByID fetch a metric cluster configuration by id
|
||||
func (a *API) FetchMetricClusterByID(id IDType, extras string) (*MetricCluster, error) {
|
||||
reqURL := url.URL{
|
||||
Path: fmt.Sprintf("%s/%d", baseMetricClusterPath, id),
|
||||
// FetchMetricCluster retrieves metric cluster with passed cid.
|
||||
func (a *API) FetchMetricCluster(cid CIDType, extras string) (*MetricCluster, error) {
|
||||
if cid == nil || *cid == "" {
|
||||
return nil, fmt.Errorf("Invalid metric cluster CID [none]")
|
||||
}
|
||||
cid := CIDType(reqURL.String())
|
||||
return a.FetchMetricClusterByCID(cid, extras)
|
||||
}
|
||||
|
||||
// FetchMetricClusterByCID fetch a check bundle configuration by id
|
||||
func (a *API) FetchMetricClusterByCID(cid CIDType, extras string) (*MetricCluster, error) {
|
||||
if matched, err := regexp.MatchString("^"+baseMetricClusterPath+"/[0-9]+$", string(cid)); err != nil {
|
||||
clusterCID := string(*cid)
|
||||
|
||||
matched, err := regexp.MatchString(config.MetricClusterCIDRegex, clusterCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
} else if !matched {
|
||||
return nil, fmt.Errorf("Invalid metric cluster CID %v", cid)
|
||||
}
|
||||
if !matched {
|
||||
return nil, fmt.Errorf("Invalid metric cluster CID [%s]", clusterCID)
|
||||
}
|
||||
|
||||
reqURL := url.URL{
|
||||
Path: string(cid),
|
||||
Path: clusterCID,
|
||||
}
|
||||
|
||||
extra := ""
|
||||
@@ -65,95 +72,190 @@ func (a *API) FetchMetricClusterByCID(cid CIDType, extras string) (*MetricCluste
|
||||
reqURL.RawQuery = q.Encode()
|
||||
}
|
||||
|
||||
resp, err := a.Get(reqURL.String())
|
||||
result, err := a.Get(reqURL.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if a.Debug {
|
||||
a.Log.Printf("[DEBUG] fetch metric cluster, received JSON: %s", string(result))
|
||||
}
|
||||
|
||||
cluster := &MetricCluster{}
|
||||
if err := json.Unmarshal(resp, cluster); err != nil {
|
||||
if err := json.Unmarshal(result, cluster); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cluster, nil
|
||||
}
|
||||
|
||||
// MetricClusterSearch returns list of metric clusters matching a search query (or all metric
|
||||
// clusters if no search query is provided)
|
||||
// - a search query not a filter (see: https://login.circonus.com/resources/api#searching)
|
||||
func (a *API) MetricClusterSearch(searchCriteria SearchQueryType) ([]MetricCluster, error) {
|
||||
// FetchMetricClusters retrieves all metric clusters available to API Token.
|
||||
func (a *API) FetchMetricClusters(extras string) (*[]MetricCluster, error) {
|
||||
reqURL := url.URL{
|
||||
Path: baseMetricClusterPath,
|
||||
Path: config.MetricClusterPrefix,
|
||||
}
|
||||
|
||||
if searchCriteria != "" {
|
||||
extra := ""
|
||||
switch extras {
|
||||
case "metrics":
|
||||
extra = "_matching_metrics"
|
||||
case "uuids":
|
||||
extra = "_matching_uuid_metrics"
|
||||
}
|
||||
|
||||
if extra != "" {
|
||||
q := url.Values{}
|
||||
q.Set("search", string(searchCriteria))
|
||||
q.Set("extra", extra)
|
||||
reqURL.RawQuery = q.Encode()
|
||||
}
|
||||
|
||||
resp, err := a.Get(reqURL.String())
|
||||
result, err := a.Get(reqURL.String())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var clusters []MetricCluster
|
||||
if err := json.Unmarshal(result, &clusters); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &clusters, nil
|
||||
}
|
||||
|
||||
// UpdateMetricCluster updates passed metric cluster.
|
||||
func (a *API) UpdateMetricCluster(cfg *MetricCluster) (*MetricCluster, error) {
|
||||
if cfg == nil {
|
||||
return nil, fmt.Errorf("Invalid metric cluster config [nil]")
|
||||
}
|
||||
|
||||
clusterCID := string(cfg.CID)
|
||||
|
||||
matched, err := regexp.MatchString(config.MetricClusterCIDRegex, clusterCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !matched {
|
||||
return nil, fmt.Errorf("Invalid metric cluster CID [%s]", clusterCID)
|
||||
}
|
||||
|
||||
jsonCfg, err := json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if a.Debug {
|
||||
a.Log.Printf("[DEBUG] update metric cluster, sending JSON: %s", string(jsonCfg))
|
||||
}
|
||||
|
||||
result, err := a.Put(clusterCID, jsonCfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cluster := &MetricCluster{}
|
||||
if err := json.Unmarshal(result, cluster); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cluster, nil
|
||||
}
|
||||
|
||||
// CreateMetricCluster creates a new metric cluster.
|
||||
func (a *API) CreateMetricCluster(cfg *MetricCluster) (*MetricCluster, error) {
|
||||
if cfg == nil {
|
||||
return nil, fmt.Errorf("Invalid metric cluster config [nil]")
|
||||
}
|
||||
|
||||
jsonCfg, err := json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if a.Debug {
|
||||
a.Log.Printf("[DEBUG] create metric cluster, sending JSON: %s", string(jsonCfg))
|
||||
}
|
||||
|
||||
result, err := a.Post(config.MetricClusterPrefix, jsonCfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cluster := &MetricCluster{}
|
||||
if err := json.Unmarshal(result, cluster); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cluster, nil
|
||||
}
|
||||
|
||||
// DeleteMetricCluster deletes passed metric cluster.
|
||||
func (a *API) DeleteMetricCluster(cfg *MetricCluster) (bool, error) {
|
||||
if cfg == nil {
|
||||
return false, fmt.Errorf("Invalid metric cluster config [nil]")
|
||||
}
|
||||
return a.DeleteMetricClusterByCID(CIDType(&cfg.CID))
|
||||
}
|
||||
|
||||
// DeleteMetricClusterByCID deletes metric cluster with passed cid.
|
||||
func (a *API) DeleteMetricClusterByCID(cid CIDType) (bool, error) {
|
||||
if cid == nil || *cid == "" {
|
||||
return false, fmt.Errorf("Invalid metric cluster CID [none]")
|
||||
}
|
||||
|
||||
clusterCID := string(*cid)
|
||||
|
||||
matched, err := regexp.MatchString(config.MetricClusterCIDRegex, clusterCID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !matched {
|
||||
return false, fmt.Errorf("Invalid metric cluster CID [%s]", clusterCID)
|
||||
}
|
||||
|
||||
_, err = a.Delete(clusterCID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// SearchMetricClusters returns metric clusters matching the specified
|
||||
// search query and/or filter. If nil is passed for both parameters
|
||||
// all metric clusters will be returned.
|
||||
func (a *API) SearchMetricClusters(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]MetricCluster, error) {
|
||||
q := url.Values{}
|
||||
|
||||
if searchCriteria != nil && *searchCriteria != "" {
|
||||
q.Set("search", string(*searchCriteria))
|
||||
}
|
||||
|
||||
if filterCriteria != nil && len(*filterCriteria) > 0 {
|
||||
for filter, criteria := range *filterCriteria {
|
||||
for _, val := range criteria {
|
||||
q.Add(filter, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if q.Encode() == "" {
|
||||
return a.FetchMetricClusters("")
|
||||
}
|
||||
|
||||
reqURL := url.URL{
|
||||
Path: config.MetricClusterPrefix,
|
||||
RawQuery: q.Encode(),
|
||||
}
|
||||
|
||||
result, err := a.Get(reqURL.String())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("[ERROR] API call error %+v", err)
|
||||
}
|
||||
|
||||
var clusters []MetricCluster
|
||||
if err := json.Unmarshal(resp, &clusters); err != nil {
|
||||
if err := json.Unmarshal(result, &clusters); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return clusters, nil
|
||||
}
|
||||
|
||||
// CreateMetricCluster create a new metric cluster
|
||||
func (a *API) CreateMetricCluster(config *MetricCluster) (*MetricCluster, error) {
|
||||
reqURL := url.URL{
|
||||
Path: baseMetricClusterPath,
|
||||
}
|
||||
cfg, err := json.Marshal(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := a.Post(reqURL.String(), cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cluster := &MetricCluster{}
|
||||
if err := json.Unmarshal(resp, cluster); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cluster, nil
|
||||
}
|
||||
|
||||
// UpdateMetricCluster updates a metric cluster
|
||||
func (a *API) UpdateMetricCluster(config *MetricCluster) (*MetricCluster, error) {
|
||||
if matched, err := regexp.MatchString("^"+baseMetricClusterPath+"/[0-9]+$", string(config.CID)); err != nil {
|
||||
return nil, err
|
||||
} else if !matched {
|
||||
return nil, fmt.Errorf("Invalid metric cluster CID %v", config.CID)
|
||||
}
|
||||
|
||||
reqURL := url.URL{
|
||||
Path: config.CID,
|
||||
}
|
||||
|
||||
cfg, err := json.Marshal(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := a.Put(reqURL.String(), cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cluster := &MetricCluster{}
|
||||
if err := json.Unmarshal(resp, cluster); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cluster, nil
|
||||
return &clusters, nil
|
||||
}
|
||||
|
||||
221
vendor/github.com/circonus-labs/circonus-gometrics/api/outlier_report.go
generated
vendored
Normal file
221
vendor/github.com/circonus-labs/circonus-gometrics/api/outlier_report.go
generated
vendored
Normal file
@@ -0,0 +1,221 @@
|
||||
// Copyright 2016 Circonus, Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// OutlierReport API support - Fetch, Create, Update, Delete, and Search
|
||||
// See: https://login.circonus.com/resources/api/calls/report
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
|
||||
"github.com/circonus-labs/circonus-gometrics/api/config"
|
||||
)
|
||||
|
||||
// OutlierReport defines a outlier report. See https://login.circonus.com/resources/api/calls/report for more information.
|
||||
type OutlierReport struct {
|
||||
CID string `json:"_cid,omitempty"` // string
|
||||
Config string `json:"config,omitempty"` // string
|
||||
Created uint `json:"_created,omitempty"` // uint
|
||||
CreatedBy string `json:"_created_by,omitempty"` // string
|
||||
LastModified uint `json:"_last_modified,omitempty"` // uint
|
||||
LastModifiedBy string `json:"_last_modified_by,omitempty"` // string
|
||||
MetricClusterCID string `json:"metric_cluster,omitempty"` // st ring
|
||||
Tags []string `json:"tags,omitempty"` // [] len >= 0
|
||||
Title string `json:"title,omitempty"` // string
|
||||
}
|
||||
|
||||
// NewOutlierReport returns a new OutlierReport (with defaults, if applicable)
|
||||
func NewOutlierReport() *OutlierReport {
|
||||
return &OutlierReport{}
|
||||
}
|
||||
|
||||
// FetchOutlierReport retrieves outlier report with passed cid.
|
||||
func (a *API) FetchOutlierReport(cid CIDType) (*OutlierReport, error) {
|
||||
if cid == nil || *cid == "" {
|
||||
return nil, fmt.Errorf("Invalid outlier report CID [none]")
|
||||
}
|
||||
|
||||
reportCID := string(*cid)
|
||||
|
||||
matched, err := regexp.MatchString(config.OutlierReportCIDRegex, reportCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !matched {
|
||||
return nil, fmt.Errorf("Invalid outlier report CID [%s]", reportCID)
|
||||
}
|
||||
|
||||
result, err := a.Get(reportCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if a.Debug {
|
||||
a.Log.Printf("[DEBUG] fetch outlier report, received JSON: %s", string(result))
|
||||
}
|
||||
|
||||
report := &OutlierReport{}
|
||||
if err := json.Unmarshal(result, report); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return report, nil
|
||||
}
|
||||
|
||||
// FetchOutlierReports retrieves all outlier reports available to API Token.
|
||||
func (a *API) FetchOutlierReports() (*[]OutlierReport, error) {
|
||||
result, err := a.Get(config.OutlierReportPrefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var reports []OutlierReport
|
||||
if err := json.Unmarshal(result, &reports); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &reports, nil
|
||||
}
|
||||
|
||||
// UpdateOutlierReport updates passed outlier report.
|
||||
func (a *API) UpdateOutlierReport(cfg *OutlierReport) (*OutlierReport, error) {
|
||||
if cfg == nil {
|
||||
return nil, fmt.Errorf("Invalid outlier report config [nil]")
|
||||
}
|
||||
|
||||
reportCID := string(cfg.CID)
|
||||
|
||||
matched, err := regexp.MatchString(config.OutlierReportCIDRegex, reportCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !matched {
|
||||
return nil, fmt.Errorf("Invalid outlier report CID [%s]", reportCID)
|
||||
}
|
||||
|
||||
jsonCfg, err := json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if a.Debug {
|
||||
a.Log.Printf("[DEBUG] update outlier report, sending JSON: %s", string(jsonCfg))
|
||||
}
|
||||
|
||||
result, err := a.Put(reportCID, jsonCfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
report := &OutlierReport{}
|
||||
if err := json.Unmarshal(result, report); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return report, nil
|
||||
}
|
||||
|
||||
// CreateOutlierReport creates a new outlier report.
|
||||
func (a *API) CreateOutlierReport(cfg *OutlierReport) (*OutlierReport, error) {
|
||||
if cfg == nil {
|
||||
return nil, fmt.Errorf("Invalid outlier report config [nil]")
|
||||
}
|
||||
|
||||
jsonCfg, err := json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if a.Debug {
|
||||
a.Log.Printf("[DEBUG] create outlier report, sending JSON: %s", string(jsonCfg))
|
||||
}
|
||||
|
||||
result, err := a.Post(config.OutlierReportPrefix, jsonCfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
report := &OutlierReport{}
|
||||
if err := json.Unmarshal(result, report); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return report, nil
|
||||
}
|
||||
|
||||
// DeleteOutlierReport deletes passed outlier report.
|
||||
func (a *API) DeleteOutlierReport(cfg *OutlierReport) (bool, error) {
|
||||
if cfg == nil {
|
||||
return false, fmt.Errorf("Invalid outlier report config [nil]")
|
||||
}
|
||||
return a.DeleteOutlierReportByCID(CIDType(&cfg.CID))
|
||||
}
|
||||
|
||||
// DeleteOutlierReportByCID deletes outlier report with passed cid.
|
||||
func (a *API) DeleteOutlierReportByCID(cid CIDType) (bool, error) {
|
||||
if cid == nil || *cid == "" {
|
||||
return false, fmt.Errorf("Invalid outlier report CID [none]")
|
||||
}
|
||||
|
||||
reportCID := string(*cid)
|
||||
|
||||
matched, err := regexp.MatchString(config.OutlierReportCIDRegex, reportCID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !matched {
|
||||
return false, fmt.Errorf("Invalid outlier report CID [%s]", reportCID)
|
||||
}
|
||||
|
||||
_, err = a.Delete(reportCID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// SearchOutlierReports returns outlier report matching the
|
||||
// specified search query and/or filter. If nil is passed for
|
||||
// both parameters all outlier report will be returned.
|
||||
func (a *API) SearchOutlierReports(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]OutlierReport, error) {
|
||||
q := url.Values{}
|
||||
|
||||
if searchCriteria != nil && *searchCriteria != "" {
|
||||
q.Set("search", string(*searchCriteria))
|
||||
}
|
||||
|
||||
if filterCriteria != nil && len(*filterCriteria) > 0 {
|
||||
for filter, criteria := range *filterCriteria {
|
||||
for _, val := range criteria {
|
||||
q.Add(filter, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if q.Encode() == "" {
|
||||
return a.FetchOutlierReports()
|
||||
}
|
||||
|
||||
reqURL := url.URL{
|
||||
Path: config.OutlierReportPrefix,
|
||||
RawQuery: q.Encode(),
|
||||
}
|
||||
|
||||
result, err := a.Get(reqURL.String())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("[ERROR] API call error %+v", err)
|
||||
}
|
||||
|
||||
var reports []OutlierReport
|
||||
if err := json.Unmarshal(result, &reports); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &reports, nil
|
||||
}
|
||||
151
vendor/github.com/circonus-labs/circonus-gometrics/api/provision_broker.go
generated
vendored
Normal file
151
vendor/github.com/circonus-labs/circonus-gometrics/api/provision_broker.go
generated
vendored
Normal file
@@ -0,0 +1,151 @@
|
||||
// Copyright 2016 Circonus, Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// ProvisionBroker API support - Fetch, Create, and Update
|
||||
// See: https://login.circonus.com/resources/api/calls/provision_broker
|
||||
// Note that the provision_broker endpoint does not return standard cid format
|
||||
// of '/object/item' (e.g. /provision_broker/abc-123) it just returns 'item'
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"regexp"
|
||||
|
||||
"github.com/circonus-labs/circonus-gometrics/api/config"
|
||||
)
|
||||
|
||||
// BrokerStratcon defines stratcons for broker
|
||||
type BrokerStratcon struct {
|
||||
CN string `json:"cn,omitempty"` // string
|
||||
Host string `json:"host,omitempty"` // string
|
||||
Port string `json:"port,omitempty"` // string
|
||||
}
|
||||
|
||||
// ProvisionBroker defines a provision broker [request]. See https://login.circonus.com/resources/api/calls/provision_broker for more details.
|
||||
type ProvisionBroker struct {
|
||||
Cert string `json:"_cert,omitempty"` // string
|
||||
CID string `json:"_cid,omitempty"` // string
|
||||
CSR string `json:"_csr,omitempty"` // string
|
||||
ExternalHost string `json:"external_host,omitempty"` // string
|
||||
ExternalPort string `json:"external_port,omitempty"` // string
|
||||
IPAddress string `json:"ipaddress,omitempty"` // string
|
||||
Latitude string `json:"latitude,omitempty"` // string
|
||||
Longitude string `json:"longitude,omitempty"` // string
|
||||
Name string `json:"noit_name,omitempty"` // string
|
||||
Port string `json:"port,omitempty"` // string
|
||||
PreferReverseConnection bool `json:"prefer_reverse_connection,omitempty"` // boolean
|
||||
Rebuild bool `json:"rebuild,omitempty"` // boolean
|
||||
Stratcons []BrokerStratcon `json:"_stratcons,omitempty"` // [] len >= 1
|
||||
Tags []string `json:"tags,omitempty"` // [] len >= 0
|
||||
}
|
||||
|
||||
// NewProvisionBroker returns a new ProvisionBroker (with defaults, if applicable)
|
||||
func NewProvisionBroker() *ProvisionBroker {
|
||||
return &ProvisionBroker{}
|
||||
}
|
||||
|
||||
// FetchProvisionBroker retrieves provision broker [request] with passed cid.
|
||||
func (a *API) FetchProvisionBroker(cid CIDType) (*ProvisionBroker, error) {
|
||||
if cid == nil || *cid == "" {
|
||||
return nil, fmt.Errorf("Invalid provision broker request CID [none]")
|
||||
}
|
||||
|
||||
brokerCID := string(*cid)
|
||||
|
||||
matched, err := regexp.MatchString(config.ProvisionBrokerCIDRegex, brokerCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !matched {
|
||||
return nil, fmt.Errorf("Invalid provision broker request CID [%s]", brokerCID)
|
||||
}
|
||||
|
||||
result, err := a.Get(brokerCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if a.Debug {
|
||||
a.Log.Printf("[DEBUG] fetch broker provision request, received JSON: %s", string(result))
|
||||
}
|
||||
|
||||
broker := &ProvisionBroker{}
|
||||
if err := json.Unmarshal(result, broker); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return broker, nil
|
||||
}
|
||||
|
||||
// UpdateProvisionBroker updates a broker definition [request].
|
||||
func (a *API) UpdateProvisionBroker(cid CIDType, cfg *ProvisionBroker) (*ProvisionBroker, error) {
|
||||
if cfg == nil {
|
||||
return nil, fmt.Errorf("Invalid provision broker request config [nil]")
|
||||
}
|
||||
|
||||
if cid == nil || *cid == "" {
|
||||
return nil, fmt.Errorf("Invalid provision broker request CID [none]")
|
||||
}
|
||||
|
||||
brokerCID := string(*cid)
|
||||
|
||||
matched, err := regexp.MatchString(config.ProvisionBrokerCIDRegex, brokerCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !matched {
|
||||
return nil, fmt.Errorf("Invalid provision broker request CID [%s]", brokerCID)
|
||||
}
|
||||
|
||||
jsonCfg, err := json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if a.Debug {
|
||||
a.Log.Printf("[DEBUG] update broker provision request, sending JSON: %s", string(jsonCfg))
|
||||
}
|
||||
|
||||
result, err := a.Put(brokerCID, jsonCfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
broker := &ProvisionBroker{}
|
||||
if err := json.Unmarshal(result, broker); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return broker, nil
|
||||
}
|
||||
|
||||
// CreateProvisionBroker creates a new provison broker [request].
|
||||
func (a *API) CreateProvisionBroker(cfg *ProvisionBroker) (*ProvisionBroker, error) {
|
||||
if cfg == nil {
|
||||
return nil, fmt.Errorf("Invalid provision broker request config [nil]")
|
||||
}
|
||||
|
||||
jsonCfg, err := json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if a.Debug {
|
||||
a.Log.Printf("[DEBUG] create broker provision request, sending JSON: %s", string(jsonCfg))
|
||||
}
|
||||
|
||||
result, err := a.Post(config.ProvisionBrokerPrefix, jsonCfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
broker := &ProvisionBroker{}
|
||||
if err := json.Unmarshal(result, broker); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return broker, nil
|
||||
}
|
||||
234
vendor/github.com/circonus-labs/circonus-gometrics/api/rule_set.go
generated
vendored
Normal file
234
vendor/github.com/circonus-labs/circonus-gometrics/api/rule_set.go
generated
vendored
Normal file
@@ -0,0 +1,234 @@
|
||||
// Copyright 2016 Circonus, Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Rule Set API support - Fetch, Create, Update, Delete, and Search
|
||||
// See: https://login.circonus.com/resources/api/calls/rule_set
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
|
||||
"github.com/circonus-labs/circonus-gometrics/api/config"
|
||||
)
|
||||
|
||||
// RuleSetRule defines a ruleset rule
|
||||
type RuleSetRule struct {
|
||||
Criteria string `json:"criteria"` // string
|
||||
Severity uint `json:"severity"` // uint
|
||||
Value string `json:"value"` // string
|
||||
Wait uint `json:"wait,omitempty"` // uint
|
||||
WindowingDuration uint `json:"windowing_duration,omitempty"` // uint
|
||||
WindowingFunction *string `json:"windowing_function,omitempty"` // string or null
|
||||
}
|
||||
|
||||
// RuleSet defines a ruleset. See https://login.circonus.com/resources/api/calls/rule_set for more information.
|
||||
type RuleSet struct {
|
||||
CheckCID string `json:"check"` // string
|
||||
CID string `json:"_cid,omitempty"` // string
|
||||
ContactGroups map[uint8][]string `json:"contact_groups"` // [] len 5
|
||||
Derive *string `json:"derive,omitempty"` // string or null
|
||||
Link *string `json:"link"` // string or null
|
||||
MetricName string `json:"metric_name"` // string
|
||||
MetricTags []string `json:"metric_tags"` // [] len >= 0
|
||||
MetricType string `json:"metric_type"` // string
|
||||
Notes *string `json:"notes"` // string or null
|
||||
Parent *string `json:"parent,omitempty"` // string or null
|
||||
Rules []RuleSetRule `json:"rules"` // [] len >= 1
|
||||
Tags []string `json:"tags"` // [] len >= 0
|
||||
}
|
||||
|
||||
// NewRuleSet returns a new RuleSet (with defaults if applicable)
|
||||
func NewRuleSet() *RuleSet {
|
||||
return &RuleSet{}
|
||||
}
|
||||
|
||||
// FetchRuleSet retrieves rule set with passed cid.
|
||||
func (a *API) FetchRuleSet(cid CIDType) (*RuleSet, error) {
|
||||
if cid == nil || *cid == "" {
|
||||
return nil, fmt.Errorf("Invalid rule set CID [none]")
|
||||
}
|
||||
|
||||
rulesetCID := string(*cid)
|
||||
|
||||
matched, err := regexp.MatchString(config.RuleSetCIDRegex, rulesetCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !matched {
|
||||
return nil, fmt.Errorf("Invalid rule set CID [%s]", rulesetCID)
|
||||
}
|
||||
|
||||
result, err := a.Get(rulesetCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if a.Debug {
|
||||
a.Log.Printf("[DEBUG] fetch rule set, received JSON: %s", string(result))
|
||||
}
|
||||
|
||||
ruleset := &RuleSet{}
|
||||
if err := json.Unmarshal(result, ruleset); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ruleset, nil
|
||||
}
|
||||
|
||||
// FetchRuleSets retrieves all rule sets available to API Token.
|
||||
func (a *API) FetchRuleSets() (*[]RuleSet, error) {
|
||||
result, err := a.Get(config.RuleSetPrefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var rulesets []RuleSet
|
||||
if err := json.Unmarshal(result, &rulesets); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &rulesets, nil
|
||||
}
|
||||
|
||||
// UpdateRuleSet updates passed rule set.
|
||||
func (a *API) UpdateRuleSet(cfg *RuleSet) (*RuleSet, error) {
|
||||
if cfg == nil {
|
||||
return nil, fmt.Errorf("Invalid rule set config [nil]")
|
||||
}
|
||||
|
||||
rulesetCID := string(cfg.CID)
|
||||
|
||||
matched, err := regexp.MatchString(config.RuleSetCIDRegex, rulesetCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !matched {
|
||||
return nil, fmt.Errorf("Invalid rule set CID [%s]", rulesetCID)
|
||||
}
|
||||
|
||||
jsonCfg, err := json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if a.Debug {
|
||||
a.Log.Printf("[DEBUG] update rule set, sending JSON: %s", string(jsonCfg))
|
||||
}
|
||||
|
||||
result, err := a.Put(rulesetCID, jsonCfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ruleset := &RuleSet{}
|
||||
if err := json.Unmarshal(result, ruleset); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ruleset, nil
|
||||
}
|
||||
|
||||
// CreateRuleSet creates a new rule set.
|
||||
func (a *API) CreateRuleSet(cfg *RuleSet) (*RuleSet, error) {
|
||||
if cfg == nil {
|
||||
return nil, fmt.Errorf("Invalid rule set config [nil]")
|
||||
}
|
||||
|
||||
jsonCfg, err := json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if a.Debug {
|
||||
a.Log.Printf("[DEBUG] create rule set, sending JSON: %s", string(jsonCfg))
|
||||
}
|
||||
|
||||
resp, err := a.Post(config.RuleSetPrefix, jsonCfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ruleset := &RuleSet{}
|
||||
if err := json.Unmarshal(resp, ruleset); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ruleset, nil
|
||||
}
|
||||
|
||||
// DeleteRuleSet deletes passed rule set.
|
||||
func (a *API) DeleteRuleSet(cfg *RuleSet) (bool, error) {
|
||||
if cfg == nil {
|
||||
return false, fmt.Errorf("Invalid rule set config [nil]")
|
||||
}
|
||||
return a.DeleteRuleSetByCID(CIDType(&cfg.CID))
|
||||
}
|
||||
|
||||
// DeleteRuleSetByCID deletes rule set with passed cid.
|
||||
func (a *API) DeleteRuleSetByCID(cid CIDType) (bool, error) {
|
||||
if cid == nil || *cid == "" {
|
||||
return false, fmt.Errorf("Invalid rule set CID [none]")
|
||||
}
|
||||
|
||||
rulesetCID := string(*cid)
|
||||
|
||||
matched, err := regexp.MatchString(config.RuleSetCIDRegex, rulesetCID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !matched {
|
||||
return false, fmt.Errorf("Invalid rule set CID [%s]", rulesetCID)
|
||||
}
|
||||
|
||||
_, err = a.Delete(rulesetCID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// SearchRuleSets returns rule sets matching the specified search
|
||||
// query and/or filter. If nil is passed for both parameters all
|
||||
// rule sets will be returned.
|
||||
func (a *API) SearchRuleSets(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]RuleSet, error) {
|
||||
q := url.Values{}
|
||||
|
||||
if searchCriteria != nil && *searchCriteria != "" {
|
||||
q.Set("search", string(*searchCriteria))
|
||||
}
|
||||
|
||||
if filterCriteria != nil && len(*filterCriteria) > 0 {
|
||||
for filter, criteria := range *filterCriteria {
|
||||
for _, val := range criteria {
|
||||
q.Add(filter, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if q.Encode() == "" {
|
||||
return a.FetchRuleSets()
|
||||
}
|
||||
|
||||
reqURL := url.URL{
|
||||
Path: config.RuleSetPrefix,
|
||||
RawQuery: q.Encode(),
|
||||
}
|
||||
|
||||
result, err := a.Get(reqURL.String())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("[ERROR] API call error %+v", err)
|
||||
}
|
||||
|
||||
var rulesets []RuleSet
|
||||
if err := json.Unmarshal(result, &rulesets); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &rulesets, nil
|
||||
}
|
||||
231
vendor/github.com/circonus-labs/circonus-gometrics/api/rule_set_group.go
generated
vendored
Normal file
231
vendor/github.com/circonus-labs/circonus-gometrics/api/rule_set_group.go
generated
vendored
Normal file
@@ -0,0 +1,231 @@
|
||||
// Copyright 2016 Circonus, Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// RuleSetGroup API support - Fetch, Create, Update, Delete, and Search
|
||||
// See: https://login.circonus.com/resources/api/calls/rule_set_group
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
|
||||
"github.com/circonus-labs/circonus-gometrics/api/config"
|
||||
)
|
||||
|
||||
// RuleSetGroupFormula defines a formula for raising alerts
|
||||
type RuleSetGroupFormula struct {
|
||||
Expression interface{} `json:"expression"` // string or uint BUG doc: string, api: string or numeric
|
||||
RaiseSeverity uint `json:"raise_severity"` // uint
|
||||
Wait uint `json:"wait"` // uint
|
||||
}
|
||||
|
||||
// RuleSetGroupCondition defines conditions for raising alerts
|
||||
type RuleSetGroupCondition struct {
|
||||
MatchingSeverities []string `json:"matching_serverities"` // [] len >= 1
|
||||
RuleSetCID string `json:"rule_set"` // string
|
||||
}
|
||||
|
||||
// RuleSetGroup defines a ruleset group. See https://login.circonus.com/resources/api/calls/rule_set_group for more information.
|
||||
type RuleSetGroup struct {
|
||||
CID string `json:"_cid,omitempty"` // string
|
||||
ContactGroups map[uint8][]string `json:"contact_groups"` // [] len == 5
|
||||
Formulas []RuleSetGroupFormula `json:"formulas"` // [] len >= 0
|
||||
Name string `json:"name"` // string
|
||||
RuleSetConditions []RuleSetGroupCondition `json:"rule_set_conditions"` // [] len >= 1
|
||||
Tags []string `json:"tags"` // [] len >= 0
|
||||
}
|
||||
|
||||
// NewRuleSetGroup returns a new RuleSetGroup (with defaults, if applicable)
|
||||
func NewRuleSetGroup() *RuleSetGroup {
|
||||
return &RuleSetGroup{}
|
||||
}
|
||||
|
||||
// FetchRuleSetGroup retrieves rule set group with passed cid.
|
||||
func (a *API) FetchRuleSetGroup(cid CIDType) (*RuleSetGroup, error) {
|
||||
if cid == nil || *cid == "" {
|
||||
return nil, fmt.Errorf("Invalid rule set group CID [none]")
|
||||
}
|
||||
|
||||
groupCID := string(*cid)
|
||||
|
||||
matched, err := regexp.MatchString(config.RuleSetGroupCIDRegex, groupCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !matched {
|
||||
return nil, fmt.Errorf("Invalid rule set group CID [%s]", groupCID)
|
||||
}
|
||||
|
||||
result, err := a.Get(groupCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if a.Debug {
|
||||
a.Log.Printf("[DEBUG] fetch rule set group, received JSON: %s", string(result))
|
||||
}
|
||||
|
||||
rulesetGroup := &RuleSetGroup{}
|
||||
if err := json.Unmarshal(result, rulesetGroup); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return rulesetGroup, nil
|
||||
}
|
||||
|
||||
// FetchRuleSetGroups retrieves all rule set groups available to API Token.
|
||||
func (a *API) FetchRuleSetGroups() (*[]RuleSetGroup, error) {
|
||||
result, err := a.Get(config.RuleSetGroupPrefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var rulesetGroups []RuleSetGroup
|
||||
if err := json.Unmarshal(result, &rulesetGroups); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &rulesetGroups, nil
|
||||
}
|
||||
|
||||
// UpdateRuleSetGroup updates passed rule set group.
|
||||
func (a *API) UpdateRuleSetGroup(cfg *RuleSetGroup) (*RuleSetGroup, error) {
|
||||
if cfg == nil {
|
||||
return nil, fmt.Errorf("Invalid rule set group config [nil]")
|
||||
}
|
||||
|
||||
groupCID := string(cfg.CID)
|
||||
|
||||
matched, err := regexp.MatchString(config.RuleSetGroupCIDRegex, groupCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !matched {
|
||||
return nil, fmt.Errorf("Invalid rule set group CID [%s]", groupCID)
|
||||
}
|
||||
|
||||
jsonCfg, err := json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if a.Debug {
|
||||
a.Log.Printf("[DEBUG] update rule set group, sending JSON: %s", string(jsonCfg))
|
||||
}
|
||||
|
||||
result, err := a.Put(groupCID, jsonCfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
groups := &RuleSetGroup{}
|
||||
if err := json.Unmarshal(result, groups); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return groups, nil
|
||||
}
|
||||
|
||||
// CreateRuleSetGroup creates a new rule set group.
|
||||
func (a *API) CreateRuleSetGroup(cfg *RuleSetGroup) (*RuleSetGroup, error) {
|
||||
if cfg == nil {
|
||||
return nil, fmt.Errorf("Invalid rule set group config [nil]")
|
||||
}
|
||||
|
||||
jsonCfg, err := json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if a.Debug {
|
||||
a.Log.Printf("[DEBUG] create rule set group, sending JSON: %s", string(jsonCfg))
|
||||
}
|
||||
|
||||
result, err := a.Post(config.RuleSetGroupPrefix, jsonCfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
group := &RuleSetGroup{}
|
||||
if err := json.Unmarshal(result, group); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return group, nil
|
||||
}
|
||||
|
||||
// DeleteRuleSetGroup deletes passed rule set group.
|
||||
func (a *API) DeleteRuleSetGroup(cfg *RuleSetGroup) (bool, error) {
|
||||
if cfg == nil {
|
||||
return false, fmt.Errorf("Invalid rule set group config [nil]")
|
||||
}
|
||||
return a.DeleteRuleSetGroupByCID(CIDType(&cfg.CID))
|
||||
}
|
||||
|
||||
// DeleteRuleSetGroupByCID deletes rule set group wiht passed cid.
|
||||
func (a *API) DeleteRuleSetGroupByCID(cid CIDType) (bool, error) {
|
||||
if cid == nil || *cid == "" {
|
||||
return false, fmt.Errorf("Invalid rule set group CID [none]")
|
||||
}
|
||||
|
||||
groupCID := string(*cid)
|
||||
|
||||
matched, err := regexp.MatchString(config.RuleSetGroupCIDRegex, groupCID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !matched {
|
||||
return false, fmt.Errorf("Invalid rule set group CID [%s]", groupCID)
|
||||
}
|
||||
|
||||
_, err = a.Delete(groupCID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// SearchRuleSetGroups returns rule set groups matching the
|
||||
// specified search query and/or filter. If nil is passed for
|
||||
// both parameters all rule set groups will be returned.
|
||||
func (a *API) SearchRuleSetGroups(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]RuleSetGroup, error) {
|
||||
q := url.Values{}
|
||||
|
||||
if searchCriteria != nil && *searchCriteria != "" {
|
||||
q.Set("search", string(*searchCriteria))
|
||||
}
|
||||
|
||||
if filterCriteria != nil && len(*filterCriteria) > 0 {
|
||||
for filter, criteria := range *filterCriteria {
|
||||
for _, val := range criteria {
|
||||
q.Add(filter, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if q.Encode() == "" {
|
||||
return a.FetchRuleSetGroups()
|
||||
}
|
||||
|
||||
reqURL := url.URL{
|
||||
Path: config.RuleSetGroupPrefix,
|
||||
RawQuery: q.Encode(),
|
||||
}
|
||||
|
||||
result, err := a.Get(reqURL.String())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("[ERROR] API call error %+v", err)
|
||||
}
|
||||
|
||||
var groups []RuleSetGroup
|
||||
if err := json.Unmarshal(result, &groups); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &groups, nil
|
||||
}
|
||||
159
vendor/github.com/circonus-labs/circonus-gometrics/api/user.go
generated
vendored
Normal file
159
vendor/github.com/circonus-labs/circonus-gometrics/api/user.go
generated
vendored
Normal file
@@ -0,0 +1,159 @@
|
||||
// Copyright 2016 Circonus, Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// User API support - Fetch, Update, and Search
|
||||
// See: https://login.circonus.com/resources/api/calls/user
|
||||
// Note: Create and Delete are not supported directly via the User API
|
||||
// endpoint. See the Account endpoint for inviting and removing users
|
||||
// from specific accounts.
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
|
||||
"github.com/circonus-labs/circonus-gometrics/api/config"
|
||||
)
|
||||
|
||||
// UserContactInfo defines known contact details
|
||||
type UserContactInfo struct {
|
||||
SMS string `json:"sms,omitempty"` // string
|
||||
XMPP string `json:"xmpp,omitempty"` // string
|
||||
}
|
||||
|
||||
// User defines a user. See https://login.circonus.com/resources/api/calls/user for more information.
|
||||
type User struct {
|
||||
CID string `json:"_cid,omitempty"` // string
|
||||
ContactInfo UserContactInfo `json:"contact_info,omitempty"` // UserContactInfo
|
||||
Email string `json:"email"` // string
|
||||
Firstname string `json:"firstname"` // string
|
||||
Lastname string `json:"lastname"` // string
|
||||
}
|
||||
|
||||
// FetchUser retrieves user with passed cid. Pass nil for '/user/current'.
|
||||
func (a *API) FetchUser(cid CIDType) (*User, error) {
|
||||
var userCID string
|
||||
|
||||
if cid == nil || *cid == "" {
|
||||
userCID = config.UserPrefix + "/current"
|
||||
} else {
|
||||
userCID = string(*cid)
|
||||
}
|
||||
|
||||
matched, err := regexp.MatchString(config.UserCIDRegex, userCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !matched {
|
||||
return nil, fmt.Errorf("Invalid user CID [%s]", userCID)
|
||||
}
|
||||
|
||||
result, err := a.Get(userCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if a.Debug {
|
||||
a.Log.Printf("[DEBUG] fetch user, received JSON: %s", string(result))
|
||||
}
|
||||
|
||||
user := new(User)
|
||||
if err := json.Unmarshal(result, user); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return user, nil
|
||||
}
|
||||
|
||||
// FetchUsers retrieves all users available to API Token.
|
||||
func (a *API) FetchUsers() (*[]User, error) {
|
||||
result, err := a.Get(config.UserPrefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var users []User
|
||||
if err := json.Unmarshal(result, &users); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &users, nil
|
||||
}
|
||||
|
||||
// UpdateUser updates passed user.
|
||||
func (a *API) UpdateUser(cfg *User) (*User, error) {
|
||||
if cfg == nil {
|
||||
return nil, fmt.Errorf("Invalid user config [nil]")
|
||||
}
|
||||
|
||||
userCID := string(cfg.CID)
|
||||
|
||||
matched, err := regexp.MatchString(config.UserCIDRegex, userCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !matched {
|
||||
return nil, fmt.Errorf("Invalid user CID [%s]", userCID)
|
||||
}
|
||||
|
||||
jsonCfg, err := json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if a.Debug {
|
||||
a.Log.Printf("[DEBUG] update user, sending JSON: %s", string(jsonCfg))
|
||||
}
|
||||
|
||||
result, err := a.Put(userCID, jsonCfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
user := &User{}
|
||||
if err := json.Unmarshal(result, user); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return user, nil
|
||||
}
|
||||
|
||||
// SearchUsers returns users matching a filter (search queries
|
||||
// are not suppoted by the user endpoint). Pass nil as filter for all
|
||||
// users available to the API Token.
|
||||
func (a *API) SearchUsers(filterCriteria *SearchFilterType) (*[]User, error) {
|
||||
q := url.Values{}
|
||||
|
||||
if filterCriteria != nil && len(*filterCriteria) > 0 {
|
||||
for filter, criteria := range *filterCriteria {
|
||||
for _, val := range criteria {
|
||||
q.Add(filter, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if q.Encode() == "" {
|
||||
return a.FetchUsers()
|
||||
}
|
||||
|
||||
reqURL := url.URL{
|
||||
Path: config.UserPrefix,
|
||||
RawQuery: q.Encode(),
|
||||
}
|
||||
|
||||
result, err := a.Get(reqURL.String())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("[ERROR] API call error %+v", err)
|
||||
}
|
||||
|
||||
var users []User
|
||||
if err := json.Unmarshal(result, &users); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &users, nil
|
||||
}
|
||||
232
vendor/github.com/circonus-labs/circonus-gometrics/api/worksheet.go
generated
vendored
Normal file
232
vendor/github.com/circonus-labs/circonus-gometrics/api/worksheet.go
generated
vendored
Normal file
@@ -0,0 +1,232 @@
|
||||
// Copyright 2016 Circonus, Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Worksheet API support - Fetch, Create, Update, Delete, and Search
|
||||
// See: https://login.circonus.com/resources/api/calls/worksheet
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"regexp"
|
||||
|
||||
"github.com/circonus-labs/circonus-gometrics/api/config"
|
||||
)
|
||||
|
||||
// WorksheetGraph defines a worksheet cid to be include in the worksheet
|
||||
type WorksheetGraph struct {
|
||||
GraphCID string `json:"graph"` // string
|
||||
}
|
||||
|
||||
// WorksheetSmartQuery defines a query to include multiple worksheets
|
||||
type WorksheetSmartQuery struct {
|
||||
Name string `json:"name"`
|
||||
Order []string `json:"order"`
|
||||
Query string `json:"query"`
|
||||
}
|
||||
|
||||
// Worksheet defines a worksheet. See https://login.circonus.com/resources/api/calls/worksheet for more information.
|
||||
type Worksheet struct {
|
||||
CID string `json:"_cid,omitempty"` // string
|
||||
Description *string `json:"description"` // string or null
|
||||
Favorite bool `json:"favorite"` // boolean
|
||||
Graphs []WorksheetGraph `json:"worksheets,omitempty"` // [] len >= 0
|
||||
Notes *string `json:"notes"` // string or null
|
||||
SmartQueries []WorksheetSmartQuery `json:"smart_queries,omitempty"` // [] len >= 0
|
||||
Tags []string `json:"tags"` // [] len >= 0
|
||||
Title string `json:"title"` // string
|
||||
}
|
||||
|
||||
// NewWorksheet returns a new Worksheet (with defaults, if applicable)
|
||||
func NewWorksheet() *Worksheet {
|
||||
return &Worksheet{}
|
||||
}
|
||||
|
||||
// FetchWorksheet retrieves worksheet with passed cid.
|
||||
func (a *API) FetchWorksheet(cid CIDType) (*Worksheet, error) {
|
||||
if cid == nil || *cid == "" {
|
||||
return nil, fmt.Errorf("Invalid worksheet CID [none]")
|
||||
}
|
||||
|
||||
worksheetCID := string(*cid)
|
||||
|
||||
matched, err := regexp.MatchString(config.WorksheetCIDRegex, worksheetCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !matched {
|
||||
return nil, fmt.Errorf("Invalid worksheet CID [%s]", worksheetCID)
|
||||
}
|
||||
|
||||
result, err := a.Get(string(*cid))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if a.Debug {
|
||||
a.Log.Printf("[DEBUG] fetch worksheet, received JSON: %s", string(result))
|
||||
}
|
||||
|
||||
worksheet := new(Worksheet)
|
||||
if err := json.Unmarshal(result, worksheet); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return worksheet, nil
|
||||
}
|
||||
|
||||
// FetchWorksheets retrieves all worksheets available to API Token.
|
||||
func (a *API) FetchWorksheets() (*[]Worksheet, error) {
|
||||
result, err := a.Get(config.WorksheetPrefix)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var worksheets []Worksheet
|
||||
if err := json.Unmarshal(result, &worksheets); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &worksheets, nil
|
||||
}
|
||||
|
||||
// UpdateWorksheet updates passed worksheet.
|
||||
func (a *API) UpdateWorksheet(cfg *Worksheet) (*Worksheet, error) {
|
||||
if cfg == nil {
|
||||
return nil, fmt.Errorf("Invalid worksheet config [nil]")
|
||||
}
|
||||
|
||||
worksheetCID := string(cfg.CID)
|
||||
|
||||
matched, err := regexp.MatchString(config.WorksheetCIDRegex, worksheetCID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !matched {
|
||||
return nil, fmt.Errorf("Invalid worksheet CID [%s]", worksheetCID)
|
||||
}
|
||||
|
||||
jsonCfg, err := json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if a.Debug {
|
||||
a.Log.Printf("[DEBUG] update worksheet, sending JSON: %s", string(jsonCfg))
|
||||
}
|
||||
|
||||
result, err := a.Put(worksheetCID, jsonCfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
worksheet := &Worksheet{}
|
||||
if err := json.Unmarshal(result, worksheet); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return worksheet, nil
|
||||
}
|
||||
|
||||
// CreateWorksheet creates a new worksheet.
|
||||
func (a *API) CreateWorksheet(cfg *Worksheet) (*Worksheet, error) {
|
||||
if cfg == nil {
|
||||
return nil, fmt.Errorf("Invalid worksheet config [nil]")
|
||||
}
|
||||
|
||||
jsonCfg, err := json.Marshal(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if a.Debug {
|
||||
a.Log.Printf("[DEBUG] create annotation, sending JSON: %s", string(jsonCfg))
|
||||
}
|
||||
|
||||
result, err := a.Post(config.WorksheetPrefix, jsonCfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
worksheet := &Worksheet{}
|
||||
if err := json.Unmarshal(result, worksheet); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return worksheet, nil
|
||||
}
|
||||
|
||||
// DeleteWorksheet deletes passed worksheet.
|
||||
func (a *API) DeleteWorksheet(cfg *Worksheet) (bool, error) {
|
||||
if cfg == nil {
|
||||
return false, fmt.Errorf("Invalid worksheet config [nil]")
|
||||
}
|
||||
return a.DeleteWorksheetByCID(CIDType(&cfg.CID))
|
||||
}
|
||||
|
||||
// DeleteWorksheetByCID deletes worksheet with passed cid.
|
||||
func (a *API) DeleteWorksheetByCID(cid CIDType) (bool, error) {
|
||||
if cid == nil || *cid == "" {
|
||||
return false, fmt.Errorf("Invalid worksheet CID [none]")
|
||||
}
|
||||
|
||||
worksheetCID := string(*cid)
|
||||
|
||||
matched, err := regexp.MatchString(config.WorksheetCIDRegex, worksheetCID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !matched {
|
||||
return false, fmt.Errorf("Invalid worksheet CID [%s]", worksheetCID)
|
||||
}
|
||||
|
||||
_, err = a.Delete(worksheetCID)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// SearchWorksheets returns worksheets matching the specified search
|
||||
// query and/or filter. If nil is passed for both parameters all
|
||||
// worksheets will be returned.
|
||||
func (a *API) SearchWorksheets(searchCriteria *SearchQueryType, filterCriteria *SearchFilterType) (*[]Worksheet, error) {
|
||||
q := url.Values{}
|
||||
|
||||
if searchCriteria != nil && *searchCriteria != "" {
|
||||
q.Set("search", string(*searchCriteria))
|
||||
}
|
||||
|
||||
if filterCriteria != nil && len(*filterCriteria) > 0 {
|
||||
for filter, criteria := range *filterCriteria {
|
||||
for _, val := range criteria {
|
||||
q.Add(filter, val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if q.Encode() == "" {
|
||||
return a.FetchWorksheets()
|
||||
}
|
||||
|
||||
reqURL := url.URL{
|
||||
Path: config.WorksheetPrefix,
|
||||
RawQuery: q.Encode(),
|
||||
}
|
||||
|
||||
result, err := a.Get(reqURL.String())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("[ERROR] API call error %+v", err)
|
||||
}
|
||||
|
||||
var worksheets []Worksheet
|
||||
if err := json.Unmarshal(result, &worksheets); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &worksheets, nil
|
||||
}
|
||||
36
vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/broker.go
generated
vendored
36
vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/broker.go
generated
vendored
@@ -24,7 +24,8 @@ func init() {
|
||||
// Get Broker to use when creating a check
|
||||
func (cm *CheckManager) getBroker() (*api.Broker, error) {
|
||||
if cm.brokerID != 0 {
|
||||
broker, err := cm.apih.FetchBrokerByID(cm.brokerID)
|
||||
cid := fmt.Sprintf("/broker/%d", cm.brokerID)
|
||||
broker, err := cm.apih.FetchBroker(api.CIDType(&cid))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -60,7 +61,7 @@ func (cm *CheckManager) getBrokerCN(broker *api.Broker, submissionURL api.URLTyp
|
||||
cn := ""
|
||||
|
||||
for _, detail := range broker.Details {
|
||||
if detail.IP == host {
|
||||
if *detail.IP == host {
|
||||
cn = detail.CN
|
||||
break
|
||||
}
|
||||
@@ -77,29 +78,32 @@ func (cm *CheckManager) getBrokerCN(broker *api.Broker, submissionURL api.URLTyp
|
||||
// Select a broker for use when creating a check, if a specific broker
|
||||
// was not specified.
|
||||
func (cm *CheckManager) selectBroker() (*api.Broker, error) {
|
||||
var brokerList []api.Broker
|
||||
var brokerList *[]api.Broker
|
||||
var err error
|
||||
|
||||
if len(cm.brokerSelectTag) > 0 {
|
||||
brokerList, err = cm.apih.FetchBrokerListByTag(cm.brokerSelectTag)
|
||||
filter := api.SearchFilterType{
|
||||
"f__tags_has": cm.brokerSelectTag,
|
||||
}
|
||||
brokerList, err = cm.apih.SearchBrokers(nil, &filter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
brokerList, err = cm.apih.FetchBrokerList()
|
||||
brokerList, err = cm.apih.FetchBrokers()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if len(brokerList) == 0 {
|
||||
if len(*brokerList) == 0 {
|
||||
return nil, fmt.Errorf("zero brokers found")
|
||||
}
|
||||
|
||||
validBrokers := make(map[string]api.Broker)
|
||||
haveEnterprise := false
|
||||
|
||||
for _, broker := range brokerList {
|
||||
for _, broker := range *brokerList {
|
||||
if cm.isValidBroker(&broker) {
|
||||
validBrokers[broker.CID] = broker
|
||||
if broker.Type == "enterprise" {
|
||||
@@ -117,7 +121,7 @@ func (cm *CheckManager) selectBroker() (*api.Broker, error) {
|
||||
}
|
||||
|
||||
if len(validBrokers) == 0 {
|
||||
return nil, fmt.Errorf("found %d broker(s), zero are valid", len(brokerList))
|
||||
return nil, fmt.Errorf("found %d broker(s), zero are valid", len(*brokerList))
|
||||
}
|
||||
|
||||
validBrokerKeys := reflect.ValueOf(validBrokers).MapKeys()
|
||||
@@ -146,8 +150,8 @@ func (cm *CheckManager) brokerSupportsCheckType(checkType CheckTypeType, details
|
||||
|
||||
// Is the broker valid (active, supports check type, and reachable)
|
||||
func (cm *CheckManager) isValidBroker(broker *api.Broker) bool {
|
||||
brokerHost := ""
|
||||
brokerPort := ""
|
||||
var brokerHost string
|
||||
var brokerPort string
|
||||
valid := false
|
||||
for _, detail := range broker.Details {
|
||||
|
||||
@@ -168,19 +172,19 @@ func (cm *CheckManager) isValidBroker(broker *api.Broker) bool {
|
||||
}
|
||||
|
||||
if detail.ExternalPort != 0 {
|
||||
brokerPort = strconv.Itoa(detail.ExternalPort)
|
||||
brokerPort = strconv.Itoa(int(detail.ExternalPort))
|
||||
} else {
|
||||
if detail.Port != 0 {
|
||||
brokerPort = strconv.Itoa(detail.Port)
|
||||
if *detail.Port != 0 {
|
||||
brokerPort = strconv.Itoa(int(*detail.Port))
|
||||
} else {
|
||||
brokerPort = "43191"
|
||||
}
|
||||
}
|
||||
|
||||
if detail.ExternalHost != "" {
|
||||
brokerHost = detail.ExternalHost
|
||||
if detail.ExternalHost != nil && *detail.ExternalHost != "" {
|
||||
brokerHost = *detail.ExternalHost
|
||||
} else {
|
||||
brokerHost = detail.IP
|
||||
brokerHost = *detail.IP
|
||||
}
|
||||
|
||||
// broker must be reachable and respond within designated time
|
||||
|
||||
121
vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/check.go
generated
vendored
121
vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/check.go
generated
vendored
@@ -10,11 +10,13 @@ import (
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/circonus-labs/circonus-gometrics/api"
|
||||
"github.com/circonus-labs/circonus-gometrics/api/config"
|
||||
)
|
||||
|
||||
// UpdateCheck determines if the check needs to be updated (new metrics, tags, etc.)
|
||||
@@ -35,7 +37,8 @@ func (cm *CheckManager) UpdateCheck(newMetrics map[string]*api.CheckBundleMetric
|
||||
}
|
||||
|
||||
// refresh check bundle (in case there were changes made by other apps or in UI)
|
||||
checkBundle, err := cm.apih.FetchCheckBundleByCID(api.CIDType(cm.checkBundle.CID))
|
||||
cid := cm.checkBundle.CID
|
||||
checkBundle, err := cm.apih.FetchCheckBundle(api.CIDType(&cid))
|
||||
if err != nil {
|
||||
cm.Log.Printf("[ERROR] unable to fetch up-to-date check bundle %v", err)
|
||||
return
|
||||
@@ -44,6 +47,8 @@ func (cm *CheckManager) UpdateCheck(newMetrics map[string]*api.CheckBundleMetric
|
||||
cm.checkBundle = checkBundle
|
||||
cm.cbmu.Unlock()
|
||||
|
||||
// check metric_limit and see if it’s 0, if so, don't even bother to try to update the check.
|
||||
|
||||
cm.addNewMetrics(newMetrics)
|
||||
|
||||
if len(cm.metricTags) > 0 {
|
||||
@@ -114,7 +119,7 @@ func (cm *CheckManager) initializeTrapURL() error {
|
||||
var broker *api.Broker
|
||||
|
||||
if cm.checkSubmissionURL != "" {
|
||||
check, err = cm.apih.FetchCheckBySubmissionURL(cm.checkSubmissionURL)
|
||||
check, err = cm.fetchCheckBySubmissionURL(cm.checkSubmissionURL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -138,7 +143,8 @@ func (cm *CheckManager) initializeTrapURL() error {
|
||||
check.CID, err)
|
||||
}
|
||||
} else if cm.checkID > 0 {
|
||||
check, err = cm.apih.FetchCheckByID(cm.checkID)
|
||||
cid := fmt.Sprintf("/check/%d", cm.checkID)
|
||||
check, err = cm.apih.FetchCheck(api.CIDType(&cid))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -150,7 +156,7 @@ func (cm *CheckManager) initializeTrapURL() error {
|
||||
// old search (instanceid as check.target)
|
||||
searchCriteria := fmt.Sprintf(
|
||||
"(active:1)(type:\"%s\")(host:\"%s\")(tags:%s)", cm.checkType, cm.checkTarget, strings.Join(cm.checkSearchTag, ","))
|
||||
checkBundle, err = cm.checkBundleSearch(searchCriteria, map[string]string{})
|
||||
checkBundle, err = cm.checkBundleSearch(searchCriteria, map[string][]string{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -160,7 +166,7 @@ func (cm *CheckManager) initializeTrapURL() error {
|
||||
// new search (check.target != instanceid, instanceid encoded in notes field)
|
||||
searchCriteria := fmt.Sprintf(
|
||||
"(active:1)(type:\"%s\")(tags:%s)", cm.checkType, strings.Join(cm.checkSearchTag, ","))
|
||||
filterCriteria := map[string]string{"f_notes": cm.getNotes()}
|
||||
filterCriteria := map[string][]string{"f_notes": []string{*cm.getNotes()}}
|
||||
checkBundle, err = cm.checkBundleSearch(searchCriteria, filterCriteria)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -179,7 +185,8 @@ func (cm *CheckManager) initializeTrapURL() error {
|
||||
|
||||
if checkBundle == nil {
|
||||
if check != nil {
|
||||
checkBundle, err = cm.apih.FetchCheckBundleByCID(api.CIDType(check.CheckBundleCID))
|
||||
cid := check.CheckBundleCID
|
||||
checkBundle, err = cm.apih.FetchCheckBundle(api.CIDType(&cid))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -189,7 +196,8 @@ func (cm *CheckManager) initializeTrapURL() error {
|
||||
}
|
||||
|
||||
if broker == nil {
|
||||
broker, err = cm.apih.FetchBrokerByCID(api.CIDType(checkBundle.Brokers[0]))
|
||||
cid := checkBundle.Brokers[0]
|
||||
broker, err = cm.apih.FetchBroker(api.CIDType(&cid))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -201,7 +209,14 @@ func (cm *CheckManager) initializeTrapURL() error {
|
||||
|
||||
// determine the trap url to which metrics should be PUT
|
||||
if checkBundle.Type == "httptrap" {
|
||||
cm.trapURL = api.URLType(checkBundle.Config.SubmissionURL)
|
||||
if turl, found := checkBundle.Config[config.SubmissionURL]; found {
|
||||
cm.trapURL = api.URLType(turl)
|
||||
} else {
|
||||
if cm.Debug {
|
||||
cm.Log.Printf("Missing config.%s %+v", config.SubmissionURL, checkBundle)
|
||||
}
|
||||
return fmt.Errorf("[ERROR] Unable to use check, no %s in config", config.SubmissionURL)
|
||||
}
|
||||
} else {
|
||||
// build a submission_url for non-httptrap checks out of mtev_reverse url
|
||||
if len(checkBundle.ReverseConnectURLs) == 0 {
|
||||
@@ -210,7 +225,14 @@ func (cm *CheckManager) initializeTrapURL() error {
|
||||
mtevURL := checkBundle.ReverseConnectURLs[0]
|
||||
mtevURL = strings.Replace(mtevURL, "mtev_reverse", "https", 1)
|
||||
mtevURL = strings.Replace(mtevURL, "check", "module/httptrap", 1)
|
||||
cm.trapURL = api.URLType(fmt.Sprintf("%s/%s", mtevURL, checkBundle.Config.ReverseSecret))
|
||||
if rs, found := checkBundle.Config[config.ReverseSecretKey]; found {
|
||||
cm.trapURL = api.URLType(fmt.Sprintf("%s/%s", mtevURL, rs))
|
||||
} else {
|
||||
if cm.Debug {
|
||||
cm.Log.Printf("Missing config.%s %+v", config.ReverseSecretKey, checkBundle)
|
||||
}
|
||||
return fmt.Errorf("[ERROR] Unable to use check, no %s in config", config.ReverseSecretKey)
|
||||
}
|
||||
}
|
||||
|
||||
// used when sending as "ServerName" get around certs not having IP SANS
|
||||
@@ -227,20 +249,21 @@ func (cm *CheckManager) initializeTrapURL() error {
|
||||
}
|
||||
|
||||
// Search for a check bundle given a predetermined set of criteria
|
||||
func (cm *CheckManager) checkBundleSearch(criteria string, filter map[string]string) (*api.CheckBundle, error) {
|
||||
checkBundles, err := cm.apih.CheckBundleFilterSearch(api.SearchQueryType(criteria), filter)
|
||||
func (cm *CheckManager) checkBundleSearch(criteria string, filter map[string][]string) (*api.CheckBundle, error) {
|
||||
search := api.SearchQueryType(criteria)
|
||||
checkBundles, err := cm.apih.SearchCheckBundles(&search, &filter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(checkBundles) == 0 {
|
||||
if len(*checkBundles) == 0 {
|
||||
return nil, nil // trigger creation of a new check
|
||||
}
|
||||
|
||||
numActive := 0
|
||||
checkID := -1
|
||||
|
||||
for idx, check := range checkBundles {
|
||||
for idx, check := range *checkBundles {
|
||||
if check.Status == statusActive {
|
||||
numActive++
|
||||
checkID = idx
|
||||
@@ -251,7 +274,9 @@ func (cm *CheckManager) checkBundleSearch(criteria string, filter map[string]str
|
||||
return nil, fmt.Errorf("[ERROR] multiple check bundles match criteria %s", criteria)
|
||||
}
|
||||
|
||||
return &checkBundles[checkID], nil
|
||||
bundle := (*checkBundles)[checkID]
|
||||
|
||||
return &bundle, nil
|
||||
}
|
||||
|
||||
// Create a new check to receive metrics
|
||||
@@ -272,10 +297,13 @@ func (cm *CheckManager) createNewCheck() (*api.CheckBundle, *api.Broker, error)
|
||||
|
||||
config := &api.CheckBundle{
|
||||
Brokers: []string{broker.CID},
|
||||
Config: api.CheckBundleConfig{AsyncMetrics: true, Secret: checkSecret},
|
||||
Config: map[config.Key]string{
|
||||
config.AsyncMetrics: "true",
|
||||
config.Secret: checkSecret,
|
||||
},
|
||||
DisplayName: string(cm.checkDisplayName),
|
||||
Metrics: []api.CheckBundleMetric{},
|
||||
MetricLimit: 0,
|
||||
MetricLimit: config.DefaultCheckBundleMetricLimit,
|
||||
Notes: cm.getNotes(),
|
||||
Period: 60,
|
||||
Status: statusActive,
|
||||
@@ -304,6 +332,63 @@ func (cm *CheckManager) makeSecret() (string, error) {
|
||||
return hex.EncodeToString(hash.Sum(nil))[0:16], nil
|
||||
}
|
||||
|
||||
func (cm *CheckManager) getNotes() string {
|
||||
return fmt.Sprintf("cgm_instanceid|%s", cm.checkInstanceID)
|
||||
func (cm *CheckManager) getNotes() *string {
|
||||
notes := fmt.Sprintf("cgm_instanceid|%s", cm.checkInstanceID)
|
||||
return ¬es
|
||||
}
|
||||
|
||||
// FetchCheckBySubmissionURL fetch a check configuration by submission_url
|
||||
func (cm *CheckManager) fetchCheckBySubmissionURL(submissionURL api.URLType) (*api.Check, error) {
|
||||
if string(submissionURL) == "" {
|
||||
return nil, errors.New("[ERROR] Invalid submission URL (blank)")
|
||||
}
|
||||
|
||||
u, err := url.Parse(string(submissionURL))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// valid trap url: scheme://host[:port]/module/httptrap/UUID/secret
|
||||
|
||||
// does it smell like a valid trap url path
|
||||
if !strings.Contains(u.Path, "/module/httptrap/") {
|
||||
return nil, fmt.Errorf("[ERROR] Invalid submission URL '%s', unrecognized path", submissionURL)
|
||||
}
|
||||
|
||||
// extract uuid
|
||||
pathParts := strings.Split(strings.Replace(u.Path, "/module/httptrap/", "", 1), "/")
|
||||
if len(pathParts) != 2 {
|
||||
return nil, fmt.Errorf("[ERROR] Invalid submission URL '%s', UUID not where expected", submissionURL)
|
||||
}
|
||||
uuid := pathParts[0]
|
||||
|
||||
filter := api.SearchFilterType{"f__check_uuid": []string{uuid}}
|
||||
|
||||
checks, err := cm.apih.SearchChecks(nil, &filter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(*checks) == 0 {
|
||||
return nil, fmt.Errorf("[ERROR] No checks found with UUID %s", uuid)
|
||||
}
|
||||
|
||||
numActive := 0
|
||||
checkID := -1
|
||||
|
||||
for idx, check := range *checks {
|
||||
if check.Active {
|
||||
numActive++
|
||||
checkID = idx
|
||||
}
|
||||
}
|
||||
|
||||
if numActive > 1 {
|
||||
return nil, fmt.Errorf("[ERROR] Multiple checks with same UUID %s", uuid)
|
||||
}
|
||||
|
||||
check := (*checks)[checkID]
|
||||
|
||||
return &check, nil
|
||||
|
||||
}
|
||||
|
||||
7
vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/checkmgr.go
generated
vendored
7
vendor/github.com/circonus-labs/circonus-gometrics/checkmgr/checkmgr.go
generated
vendored
@@ -182,6 +182,11 @@ type Trap struct {
|
||||
|
||||
// NewCheckManager returns a new check manager
|
||||
func NewCheckManager(cfg *Config) (*CheckManager, error) {
|
||||
return New(cfg)
|
||||
}
|
||||
|
||||
// New returns a new check manager
|
||||
func New(cfg *Config) (*CheckManager, error) {
|
||||
|
||||
if cfg == nil {
|
||||
return nil, errors.New("invalid Check Manager configuration (nil)")
|
||||
@@ -223,7 +228,7 @@ func NewCheckManager(cfg *Config) (*CheckManager, error) {
|
||||
cfg.API.Debug = cm.Debug
|
||||
cfg.API.Log = cm.Log
|
||||
|
||||
apih, err := api.NewAPI(&cfg.API)
|
||||
apih, err := api.New(&cfg.API)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
7
vendor/github.com/circonus-labs/circonus-gometrics/circonus-gometrics.go
generated
vendored
7
vendor/github.com/circonus-labs/circonus-gometrics/circonus-gometrics.go
generated
vendored
@@ -99,6 +99,11 @@ type CirconusMetrics struct {
|
||||
|
||||
// NewCirconusMetrics returns a CirconusMetrics instance
|
||||
func NewCirconusMetrics(cfg *Config) (*CirconusMetrics, error) {
|
||||
return New(cfg)
|
||||
}
|
||||
|
||||
// New returns a CirconusMetrics instance
|
||||
func New(cfg *Config) (*CirconusMetrics, error) {
|
||||
|
||||
if cfg == nil {
|
||||
return nil, errors.New("invalid configuration (nil)")
|
||||
@@ -184,7 +189,7 @@ func NewCirconusMetrics(cfg *Config) (*CirconusMetrics, error) {
|
||||
cfg.CheckManager.Debug = cm.Debug
|
||||
cfg.CheckManager.Log = cm.Log
|
||||
|
||||
check, err := checkmgr.NewCheckManager(&cfg.CheckManager)
|
||||
check, err := checkmgr.New(&cfg.CheckManager)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
2
vendor/github.com/circonus-labs/circonus-gometrics/submit.go
generated
vendored
2
vendor/github.com/circonus-labs/circonus-gometrics/submit.go
generated
vendored
@@ -27,7 +27,7 @@ func (m *CirconusMetrics) submit(output map[string]interface{}, newMetrics map[s
|
||||
|
||||
str, err := json.Marshal(output)
|
||||
if err != nil {
|
||||
m.Log.Printf("[ERROR] marshling output %+v", err)
|
||||
m.Log.Printf("[ERROR] marshaling output %+v", err)
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
50
vendor/github.com/coreos/etcd/clientv3/client.go
generated
vendored
50
vendor/github.com/coreos/etcd/clientv3/client.go
generated
vendored
@@ -21,6 +21,7 @@ import (
|
||||
"net"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
@@ -51,6 +52,7 @@ type Client struct {
|
||||
creds *credentials.TransportCredentials
|
||||
balancer *simpleBalancer
|
||||
retryWrapper retryRpcFunc
|
||||
retryAuthWrapper retryRpcFunc
|
||||
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
@@ -59,6 +61,8 @@ type Client struct {
|
||||
Username string
|
||||
// Password is a password for authentication
|
||||
Password string
|
||||
// tokenCred is an instance of WithPerRPCCredentials()'s argument
|
||||
tokenCred *authTokenCredential
|
||||
}
|
||||
|
||||
// New creates a new etcdv3 client from a given configuration.
|
||||
@@ -145,6 +149,7 @@ func (c *Client) autoSync() {
|
||||
|
||||
type authTokenCredential struct {
|
||||
token string
|
||||
tokenMu *sync.RWMutex
|
||||
}
|
||||
|
||||
func (cred authTokenCredential) RequireTransportSecurity() bool {
|
||||
@@ -152,6 +157,8 @@ func (cred authTokenCredential) RequireTransportSecurity() bool {
|
||||
}
|
||||
|
||||
func (cred authTokenCredential) GetRequestMetadata(ctx context.Context, s ...string) (map[string]string, error) {
|
||||
cred.tokenMu.RLock()
|
||||
defer cred.tokenMu.RUnlock()
|
||||
return map[string]string{
|
||||
"token": cred.token,
|
||||
}, nil
|
||||
@@ -236,22 +243,50 @@ func (c *Client) Dial(endpoint string) (*grpc.ClientConn, error) {
|
||||
return c.dial(endpoint)
|
||||
}
|
||||
|
||||
func (c *Client) getToken(ctx context.Context) error {
|
||||
var err error // return last error in a case of fail
|
||||
var auth *authenticator
|
||||
|
||||
for i := 0; i < len(c.cfg.Endpoints); i++ {
|
||||
endpoint := c.cfg.Endpoints[i]
|
||||
host := getHost(endpoint)
|
||||
// use dial options without dopts to avoid reusing the client balancer
|
||||
auth, err = newAuthenticator(host, c.dialSetupOpts(endpoint))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
defer auth.close()
|
||||
|
||||
var resp *AuthenticateResponse
|
||||
resp, err = auth.authenticate(ctx, c.Username, c.Password)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
c.tokenCred.tokenMu.Lock()
|
||||
c.tokenCred.token = resp.Token
|
||||
c.tokenCred.tokenMu.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *Client) dial(endpoint string, dopts ...grpc.DialOption) (*grpc.ClientConn, error) {
|
||||
opts := c.dialSetupOpts(endpoint, dopts...)
|
||||
host := getHost(endpoint)
|
||||
if c.Username != "" && c.Password != "" {
|
||||
// use dial options without dopts to avoid reusing the client balancer
|
||||
auth, err := newAuthenticator(host, c.dialSetupOpts(endpoint))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
c.tokenCred = &authTokenCredential{
|
||||
tokenMu: &sync.RWMutex{},
|
||||
}
|
||||
defer auth.close()
|
||||
|
||||
resp, err := auth.authenticate(c.ctx, c.Username, c.Password)
|
||||
err := c.getToken(context.TODO())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opts = append(opts, grpc.WithPerRPCCredentials(authTokenCredential{token: resp.Token}))
|
||||
|
||||
opts = append(opts, grpc.WithPerRPCCredentials(c.tokenCred))
|
||||
}
|
||||
|
||||
// add metrics options
|
||||
@@ -303,6 +338,7 @@ func newClient(cfg *Config) (*Client, error) {
|
||||
}
|
||||
client.conn = conn
|
||||
client.retryWrapper = client.newRetryWrapper()
|
||||
client.retryAuthWrapper = client.newAuthRetryWrapper()
|
||||
|
||||
// wait for a connection
|
||||
if cfg.DialTimeout > 0 {
|
||||
|
||||
65
vendor/github.com/coreos/etcd/clientv3/retry.go
generated
vendored
65
vendor/github.com/coreos/etcd/clientv3/retry.go
generated
vendored
@@ -33,13 +33,14 @@ func (c *Client) newRetryWrapper() retryRpcFunc {
|
||||
return nil
|
||||
}
|
||||
|
||||
// only retry if unavailable
|
||||
if grpc.Code(err) != codes.Unavailable {
|
||||
eErr := rpctypes.Error(err)
|
||||
// always stop retry on etcd errors
|
||||
if _, ok := eErr.(rpctypes.EtcdError); ok {
|
||||
return err
|
||||
}
|
||||
// always stop retry on etcd errors
|
||||
eErr := rpctypes.Error(err)
|
||||
if _, ok := eErr.(rpctypes.EtcdError); ok {
|
||||
|
||||
// only retry if unavailable
|
||||
if grpc.Code(err) != codes.Unavailable {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -54,17 +55,52 @@ func (c *Client) newRetryWrapper() retryRpcFunc {
|
||||
}
|
||||
}
|
||||
|
||||
type retryKVClient struct {
|
||||
pb.KVClient
|
||||
retryf retryRpcFunc
|
||||
func (c *Client) newAuthRetryWrapper() retryRpcFunc {
|
||||
return func(rpcCtx context.Context, f rpcFunc) error {
|
||||
for {
|
||||
err := f(rpcCtx)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
// always stop retry on etcd errors other than invalid auth token
|
||||
if rpctypes.Error(err) == rpctypes.ErrInvalidAuthToken {
|
||||
gterr := c.getToken(rpcCtx)
|
||||
if gterr != nil {
|
||||
return err // return the original error for simplicity
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RetryKVClient implements a KVClient that uses the client's FailFast retry policy.
|
||||
func RetryKVClient(c *Client) pb.KVClient {
|
||||
return &retryKVClient{pb.NewKVClient(c.conn), c.retryWrapper}
|
||||
retryWrite := &retryWriteKVClient{pb.NewKVClient(c.conn), c.retryWrapper}
|
||||
return &retryKVClient{&retryWriteKVClient{retryWrite, c.retryAuthWrapper}}
|
||||
}
|
||||
|
||||
func (rkv *retryKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) {
|
||||
type retryKVClient struct {
|
||||
*retryWriteKVClient
|
||||
}
|
||||
|
||||
func (rkv *retryKVClient) Range(ctx context.Context, in *pb.RangeRequest, opts ...grpc.CallOption) (resp *pb.RangeResponse, err error) {
|
||||
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rkv.retryWriteKVClient.Range(rctx, in, opts...)
|
||||
return err
|
||||
})
|
||||
return resp, err
|
||||
}
|
||||
|
||||
type retryWriteKVClient struct {
|
||||
pb.KVClient
|
||||
retryf retryRpcFunc
|
||||
}
|
||||
|
||||
func (rkv *retryWriteKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...grpc.CallOption) (resp *pb.PutResponse, err error) {
|
||||
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rkv.KVClient.Put(rctx, in, opts...)
|
||||
return err
|
||||
@@ -72,7 +108,7 @@ func (rkv *retryKVClient) Put(ctx context.Context, in *pb.PutRequest, opts ...gr
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rkv *retryKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) {
|
||||
func (rkv *retryWriteKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeRequest, opts ...grpc.CallOption) (resp *pb.DeleteRangeResponse, err error) {
|
||||
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rkv.KVClient.DeleteRange(rctx, in, opts...)
|
||||
return err
|
||||
@@ -80,7 +116,7 @@ func (rkv *retryKVClient) DeleteRange(ctx context.Context, in *pb.DeleteRangeReq
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rkv *retryKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) {
|
||||
func (rkv *retryWriteKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...grpc.CallOption) (resp *pb.TxnResponse, err error) {
|
||||
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rkv.KVClient.Txn(rctx, in, opts...)
|
||||
return err
|
||||
@@ -88,7 +124,7 @@ func (rkv *retryKVClient) Txn(ctx context.Context, in *pb.TxnRequest, opts ...gr
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (rkv *retryKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) {
|
||||
func (rkv *retryWriteKVClient) Compact(ctx context.Context, in *pb.CompactionRequest, opts ...grpc.CallOption) (resp *pb.CompactionResponse, err error) {
|
||||
err = rkv.retryf(ctx, func(rctx context.Context) error {
|
||||
resp, err = rkv.KVClient.Compact(rctx, in, opts...)
|
||||
return err
|
||||
@@ -103,7 +139,8 @@ type retryLeaseClient struct {
|
||||
|
||||
// RetryLeaseClient implements a LeaseClient that uses the client's FailFast retry policy.
|
||||
func RetryLeaseClient(c *Client) pb.LeaseClient {
|
||||
return &retryLeaseClient{pb.NewLeaseClient(c.conn), c.retryWrapper}
|
||||
retry := &retryLeaseClient{pb.NewLeaseClient(c.conn), c.retryWrapper}
|
||||
return &retryLeaseClient{retry, c.retryAuthWrapper}
|
||||
}
|
||||
|
||||
func (rlc *retryLeaseClient) LeaseGrant(ctx context.Context, in *pb.LeaseGrantRequest, opts ...grpc.CallOption) (resp *pb.LeaseGrantResponse, err error) {
|
||||
|
||||
3
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go
generated
vendored
3
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go
generated
vendored
@@ -52,6 +52,7 @@ var (
|
||||
ErrGRPCRoleNotGranted = grpc.Errorf(codes.FailedPrecondition, "etcdserver: role is not granted to the user")
|
||||
ErrGRPCPermissionNotGranted = grpc.Errorf(codes.FailedPrecondition, "etcdserver: permission is not granted to the role")
|
||||
ErrGRPCAuthNotEnabled = grpc.Errorf(codes.FailedPrecondition, "etcdserver: authentication is not enabled")
|
||||
ErrGRPCInvalidAuthToken = grpc.Errorf(codes.Unauthenticated, "etcdserver: invalid auth token")
|
||||
|
||||
ErrGRPCNoLeader = grpc.Errorf(codes.Unavailable, "etcdserver: no leader")
|
||||
ErrGRPCNotCapable = grpc.Errorf(codes.Unavailable, "etcdserver: not capable")
|
||||
@@ -93,6 +94,7 @@ var (
|
||||
grpc.ErrorDesc(ErrGRPCRoleNotGranted): ErrGRPCRoleNotGranted,
|
||||
grpc.ErrorDesc(ErrGRPCPermissionNotGranted): ErrGRPCPermissionNotGranted,
|
||||
grpc.ErrorDesc(ErrGRPCAuthNotEnabled): ErrGRPCAuthNotEnabled,
|
||||
grpc.ErrorDesc(ErrGRPCInvalidAuthToken): ErrGRPCInvalidAuthToken,
|
||||
|
||||
grpc.ErrorDesc(ErrGRPCNoLeader): ErrGRPCNoLeader,
|
||||
grpc.ErrorDesc(ErrGRPCNotCapable): ErrGRPCNotCapable,
|
||||
@@ -135,6 +137,7 @@ var (
|
||||
ErrRoleNotGranted = Error(ErrGRPCRoleNotGranted)
|
||||
ErrPermissionNotGranted = Error(ErrGRPCPermissionNotGranted)
|
||||
ErrAuthNotEnabled = Error(ErrGRPCAuthNotEnabled)
|
||||
ErrInvalidAuthToken = Error(ErrGRPCInvalidAuthToken)
|
||||
|
||||
ErrNoLeader = Error(ErrGRPCNoLeader)
|
||||
ErrNotCapable = Error(ErrGRPCNotCapable)
|
||||
|
||||
2
vendor/github.com/coreos/etcd/pkg/transport/listener.go
generated
vendored
2
vendor/github.com/coreos/etcd/pkg/transport/listener.go
generated
vendored
@@ -23,6 +23,7 @@ import (
|
||||
"crypto/x509/pkix"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"log"
|
||||
"math/big"
|
||||
"net"
|
||||
"os"
|
||||
@@ -235,6 +236,7 @@ func (info TLSInfo) ClientConfig() (*tls.Config, error) {
|
||||
return nil, err
|
||||
}
|
||||
// if given a CA, trust any host with a cert signed by the CA
|
||||
log.Println("warning: ignoring ServerName for user-provided CA for backwards compatibility is deprecated")
|
||||
cfg.ServerName = ""
|
||||
}
|
||||
|
||||
|
||||
86
vendor/github.com/denisenkom/go-mssqldb/README.md
generated
vendored
86
vendor/github.com/denisenkom/go-mssqldb/README.md
generated
vendored
@@ -4,16 +4,7 @@
|
||||
|
||||
go get github.com/denisenkom/go-mssqldb
|
||||
|
||||
## Tests
|
||||
|
||||
`go test` is used for testing. A running instance of MSSQL server is required.
|
||||
Environment variables are used to pass login information.
|
||||
|
||||
Example:
|
||||
|
||||
env HOST=localhost SQLUSER=sa SQLPASSWORD=sa DATABASE=test go test
|
||||
|
||||
## Connection Parameters
|
||||
## Connection Parameters and DSN
|
||||
|
||||
* "server" - host or host\instance (default localhost)
|
||||
* "port" - used only when there is no instance in server (default 1433)
|
||||
@@ -46,15 +37,70 @@ Example:
|
||||
* "app name" - The application name (default is go-mssqldb)
|
||||
* "ApplicationIntent" - Can be given the value "ReadOnly" to initiate a read-only connection to an Availability Group listener.
|
||||
|
||||
Example:
|
||||
The connection string can be specified in one of three formats:
|
||||
|
||||
```go
|
||||
db, err := sql.Open("mssql", "server=localhost;user id=sa")
|
||||
```
|
||||
1. ADO: `key=value` pairs separated by `;`. Values may not contain `;`, leading and trailing whitespace is ignored.
|
||||
Examples:
|
||||
|
||||
* `server=localhost\\SQLExpress;user id=sa;database=master;connection timeout=30`
|
||||
* `server=localhost;user id=sa;database=master;connection timeout=30`
|
||||
|
||||
2. ODBC: Prefix with `odbc`, `key=value` pairs separated by `;`. Allow `;` by wrapping
|
||||
values in `{}`. Examples:
|
||||
|
||||
* `odbc:server=localhost\\SQLExpress;user id=sa;database=master;connection timeout=30`
|
||||
* `odbc:server=localhost;user id=sa;database=master;connection timeout=30`
|
||||
* `odbc:server=localhost;user id=sa;password={foo;bar}` // Value marked with `{}`, password is "foo;bar"
|
||||
* `odbc:server=localhost;user id=sa;password={foo{bar}` // Value marked with `{}`, password is "foo{bar"
|
||||
* `odbc:server=localhost;user id=sa;password={foobar }` // Value marked with `{}`, password is "foobar "
|
||||
* `odbc:server=localhost;user id=sa;password=foo{bar` // Literal `{`, password is "foo{bar"
|
||||
* `odbc:server=localhost;user id=sa;password=foo}bar` // Literal `}`, password is "foo}bar"
|
||||
* `odbc:server=localhost;user id=sa;password={foo{bar}` // Literal `{`, password is "foo{bar"
|
||||
* `odbc:server=localhost;user id=sa;password={foo}}bar}` // Escaped `} with `}}`, password is "foo}bar"
|
||||
|
||||
3. URL: with `sqlserver` scheme. username and password appears before the host. Any instance appears as
|
||||
the first segment in the path. All other options are query parameters. Examples:
|
||||
|
||||
* `sqlserver://username:password@host/instance?param1=value¶m2=value`
|
||||
* `sqlserver://username:password@host:port?param1=value¶m2=value`
|
||||
* `sqlserver://sa@localhost/SQLExpress?database=master&connection+timeout=30` // `SQLExpress instance.
|
||||
* `sqlserver://sa:mypass@localhost?database=master&connection+timeout=30` // username=sa, password=mypass.
|
||||
* `sqlserver://sa:mypass@localhost:1234?database=master&connection+timeout=30"` // port 1234 on localhost.
|
||||
* `sqlserver://sa:my%7Bpass@somehost?connection+timeout=30` // password is "my{pass"
|
||||
|
||||
A string of this format can be constructed using the `URL` type in the `net/url` package.
|
||||
|
||||
```go
|
||||
query := url.Values{}
|
||||
query.Add("connection timeout", fmt.Sprintf("%d", connectionTimeout))
|
||||
|
||||
u := &url.URL{
|
||||
Scheme: "sqlserver",
|
||||
User: url.UserPassword(username, password),
|
||||
Host: fmt.Sprintf("%s:%d", hostname, port),
|
||||
// Path: instance, // if connecting to an instance instead of a port
|
||||
RawQuery: query.Encode(),
|
||||
}
|
||||
|
||||
connectionString := u.String()
|
||||
|
||||
db, err := sql.Open("sqlserver", connectionString)
|
||||
// or
|
||||
db, err := sql.Open("mssql", connectionString)
|
||||
```
|
||||
|
||||
## Statement Parameters
|
||||
|
||||
In the SQL statement text, literals may be replaced by a parameter that matches one of the following:
|
||||
The `sqlserver` driver uses normal MS SQL Server syntax and expects parameters in
|
||||
the sql query to be in the form of either `@Name` or `@p1` to `@pN` (ordinal position).
|
||||
|
||||
```go
|
||||
db.QueryContext(ctx, `select * from t where ID = @ID;`, sql.Named("ID", 6))
|
||||
```
|
||||
|
||||
|
||||
For the `mssql` driver, the SQL statement text will be processed and literals will
|
||||
be replaced by a parameter that matches one of the following:
|
||||
|
||||
* ?
|
||||
* ?nnn
|
||||
@@ -73,7 +119,6 @@ will expand to roughly
|
||||
SELECT * FROM t WHERE a = 'z', b = 'y', c = 'x'
|
||||
```
|
||||
|
||||
|
||||
## Features
|
||||
|
||||
* Can be used with SQL Server 2005 or newer
|
||||
@@ -87,6 +132,15 @@ SELECT * FROM t WHERE a = 'z', b = 'y', c = 'x'
|
||||
* Supports connections to AlwaysOn Availability Group listeners, including re-direction to read-only replicas.
|
||||
* Supports query notifications
|
||||
|
||||
## Tests
|
||||
|
||||
`go test` is used for testing. A running instance of MSSQL server is required.
|
||||
Environment variables are used to pass login information.
|
||||
|
||||
Example:
|
||||
|
||||
env HOST=localhost SQLUSER=sa SQLPASSWORD=sa DATABASE=test go test
|
||||
|
||||
## Known Issues
|
||||
|
||||
* SQL Server 2008 and 2008 R2 engine cannot handle login records when SSL encryption is not disabled.
|
||||
|
||||
12
vendor/github.com/denisenkom/go-mssqldb/doc.go
generated
vendored
Normal file
12
vendor/github.com/denisenkom/go-mssqldb/doc.go
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
// package mssql implements the TDS protocol used to connect to MS SQL Server (sqlserver)
|
||||
// database servers.
|
||||
//
|
||||
// This package registers two drivers:
|
||||
// sqlserver: uses native "@" parameter placeholder names and does no pre-processing.
|
||||
// mssql: expects identifiers to be prefixed with ":" and pre-processes queries.
|
||||
//
|
||||
// If the ordinal position is used for query parameters, identifiers will be named
|
||||
// "@p1", "@p2", ... "@pN".
|
||||
//
|
||||
// Please refer to the REAME for the format of the DSN.
|
||||
package mssql
|
||||
50
vendor/github.com/denisenkom/go-mssqldb/mssql.go
generated
vendored
50
vendor/github.com/denisenkom/go-mssqldb/mssql.go
generated
vendored
@@ -8,24 +8,45 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
"net"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
"reflect"
|
||||
"golang.org/x/net/context" // use the "x/net/context" for backwards compatibility.
|
||||
)
|
||||
|
||||
var driverInstance = &MssqlDriver{}
|
||||
var driverInstance = &MssqlDriver{processQueryText: true}
|
||||
var driverInstanceNoProcess = &MssqlDriver{processQueryText: false}
|
||||
|
||||
func init() {
|
||||
sql.Register("mssql", driverInstance)
|
||||
sql.Register("sqlserver", driverInstanceNoProcess)
|
||||
}
|
||||
|
||||
// Abstract the dialer for testing and for non-TCP based connections.
|
||||
type dialer interface {
|
||||
Dial(addr string) (net.Conn, error)
|
||||
}
|
||||
|
||||
var createDialer func(p *connectParams) dialer
|
||||
|
||||
type tcpDialer struct {
|
||||
nd *net.Dialer
|
||||
}
|
||||
|
||||
func (d tcpDialer) Dial(addr string) (net.Conn, error) {
|
||||
return d.nd.Dial("tcp", addr)
|
||||
}
|
||||
|
||||
type MssqlDriver struct {
|
||||
log optionalLogger
|
||||
|
||||
processQueryText bool
|
||||
}
|
||||
|
||||
func SetLogger(logger Logger) {
|
||||
driverInstance.SetLogger(logger)
|
||||
driverInstanceNoProcess.SetLogger(logger)
|
||||
}
|
||||
|
||||
func (d *MssqlDriver) SetLogger(logger Logger) {
|
||||
@@ -35,6 +56,8 @@ func (d *MssqlDriver) SetLogger(logger Logger) {
|
||||
type MssqlConn struct {
|
||||
sess *tdsSession
|
||||
transactionCtx context.Context
|
||||
|
||||
processQueryText bool
|
||||
}
|
||||
|
||||
func (c *MssqlConn) simpleProcessResp(ctx context.Context) error {
|
||||
@@ -141,7 +164,7 @@ func (d *MssqlDriver) open(dsn string) (*MssqlConn, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
sess, err := connect(params)
|
||||
sess, err := connect(d.log, params)
|
||||
if err != nil {
|
||||
// main server failed, try fail-over partner
|
||||
if params.failOverPartner == "" {
|
||||
@@ -153,14 +176,14 @@ func (d *MssqlDriver) open(dsn string) (*MssqlConn, error) {
|
||||
params.port = params.failOverPort
|
||||
}
|
||||
|
||||
sess, err = connect(params)
|
||||
sess, err = connect(d.log, params)
|
||||
if err != nil {
|
||||
// fail-over partner also failed, now fail
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
conn := &MssqlConn{sess, context.Background()}
|
||||
conn := &MssqlConn{sess, context.Background(), d.processQueryText}
|
||||
conn.sess.log = d.log
|
||||
return conn, nil
|
||||
}
|
||||
@@ -187,8 +210,11 @@ func (c *MssqlConn) Prepare(query string) (driver.Stmt, error) {
|
||||
}
|
||||
|
||||
func (c *MssqlConn) prepareContext(ctx context.Context, query string) (*MssqlStmt, error) {
|
||||
q, paramCount := parseParams(query)
|
||||
return &MssqlStmt{c, q, paramCount, nil}, nil
|
||||
paramCount := -1
|
||||
if c.processQueryText {
|
||||
query, paramCount = parseParams(query)
|
||||
}
|
||||
return &MssqlStmt{c, query, paramCount, nil}, nil
|
||||
}
|
||||
|
||||
func (s *MssqlStmt) Close() error {
|
||||
@@ -274,7 +300,7 @@ func convertOldArgs(args []driver.Value) []namedValue {
|
||||
list := make([]namedValue, len(args))
|
||||
for i, v := range args {
|
||||
list[i] = namedValue{
|
||||
Ordinal: i+1,
|
||||
Ordinal: i + 1,
|
||||
Value: v,
|
||||
}
|
||||
}
|
||||
@@ -341,11 +367,11 @@ func (s *MssqlStmt) processExec(ctx context.Context) (res driver.Result, err err
|
||||
switch token := token.(type) {
|
||||
case doneInProcStruct:
|
||||
if token.Status&doneCount != 0 {
|
||||
rowCount = int64(token.RowCount)
|
||||
rowCount += int64(token.RowCount)
|
||||
}
|
||||
case doneStruct:
|
||||
if token.Status&doneCount != 0 {
|
||||
rowCount = int64(token.RowCount)
|
||||
rowCount += int64(token.RowCount)
|
||||
}
|
||||
if token.isError() {
|
||||
return nil, token.getError()
|
||||
@@ -380,7 +406,7 @@ func (rc *MssqlRows) Columns() (res []string) {
|
||||
return
|
||||
}
|
||||
|
||||
func (rc *MssqlRows) Next(dest []driver.Value) (error) {
|
||||
func (rc *MssqlRows) Next(dest []driver.Value) error {
|
||||
if rc.nextCols != nil {
|
||||
return io.EOF
|
||||
}
|
||||
@@ -464,7 +490,7 @@ func (r *MssqlRows) ColumnTypePrecisionScale(index int) (int64, int64, bool) {
|
||||
// to be not nullable.
|
||||
// If the column nullability is unknown, ok should be false.
|
||||
func (r *MssqlRows) ColumnTypeNullable(index int) (nullable, ok bool) {
|
||||
nullable = r.cols[index].Flags & colFlagNullable != 0
|
||||
nullable = r.cols[index].Flags&colFlagNullable != 0
|
||||
ok = true
|
||||
return
|
||||
}
|
||||
|
||||
6
vendor/github.com/denisenkom/go-mssqldb/mssql_go1.3.go
generated
vendored
6
vendor/github.com/denisenkom/go-mssqldb/mssql_go1.3.go
generated
vendored
@@ -6,6 +6,8 @@ import (
|
||||
"net"
|
||||
)
|
||||
|
||||
func createDialer(p connectParams) *net.Dialer {
|
||||
return &net.Dialer{Timeout: p.dial_timeout, KeepAlive: p.keepAlive}
|
||||
func init() {
|
||||
createDialer = func(p *connectParams) dialer {
|
||||
return tcpDialer{&net.Dialer{Timeout: p.dial_timeout, KeepAlive: p.keepAlive}}
|
||||
}
|
||||
}
|
||||
|
||||
6
vendor/github.com/denisenkom/go-mssqldb/mssql_go1.3pre.go
generated
vendored
6
vendor/github.com/denisenkom/go-mssqldb/mssql_go1.3pre.go
generated
vendored
@@ -6,6 +6,8 @@ import (
|
||||
"net"
|
||||
)
|
||||
|
||||
func createDialer(p *connectParams) *net.Dialer {
|
||||
return &net.Dialer{Timeout: p.dial_timeout}
|
||||
func init() {
|
||||
createDialer = func(p *connectParams) dialer {
|
||||
return tcpDialer{&net.Dialer{Timeout: p.dial_timeout}}
|
||||
}
|
||||
}
|
||||
|
||||
249
vendor/github.com/denisenkom/go-mssqldb/tds.go
generated
vendored
249
vendor/github.com/denisenkom/go-mssqldb/tds.go
generated
vendored
@@ -9,11 +9,13 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode"
|
||||
"unicode/utf16"
|
||||
"unicode/utf8"
|
||||
"golang.org/x/net/context" // use the "x/net/context" for backwards compatibility.
|
||||
@@ -641,7 +643,7 @@ func sendSqlBatch72(buf *tdsBuffer,
|
||||
|
||||
// 2.2.1.7 Attention: https://msdn.microsoft.com/en-us/library/dd341449.aspx
|
||||
// 4.19.2 Out-of-Band Attention Signal: https://msdn.microsoft.com/en-us/library/dd305167.aspx
|
||||
func sendAttention(buf *tdsBuffer) (error) {
|
||||
func sendAttention(buf *tdsBuffer) error {
|
||||
buf.BeginPacket(packAttention)
|
||||
return buf.FinishPacket()
|
||||
}
|
||||
@@ -691,9 +693,241 @@ func splitConnectionString(dsn string) (res map[string]string) {
|
||||
return res
|
||||
}
|
||||
|
||||
// Splits a URL in the ODBC format
|
||||
func splitConnectionStringOdbc(dsn string) (map[string]string, error) {
|
||||
res := map[string]string{}
|
||||
|
||||
type parserState int
|
||||
const (
|
||||
// Before the start of a key
|
||||
parserStateBeforeKey parserState = iota
|
||||
|
||||
// Inside a key
|
||||
parserStateKey
|
||||
|
||||
// Beginning of a value. May be bare or braced
|
||||
parserStateBeginValue
|
||||
|
||||
// Inside a bare value
|
||||
parserStateBareValue
|
||||
|
||||
// Inside a braced value
|
||||
parserStateBracedValue
|
||||
|
||||
// A closing brace inside a braced value.
|
||||
// May be the end of the value or an escaped closing brace, depending on the next character
|
||||
parserStateBracedValueClosingBrace
|
||||
|
||||
// After a value. Next character should be a semi-colon or whitespace.
|
||||
parserStateEndValue
|
||||
)
|
||||
|
||||
var state = parserStateBeforeKey
|
||||
|
||||
var key string
|
||||
var value string
|
||||
|
||||
for i, c := range dsn {
|
||||
switch state {
|
||||
case parserStateBeforeKey:
|
||||
switch {
|
||||
case c == '=':
|
||||
return res, fmt.Errorf("Unexpected character = at index %d. Expected start of key or semi-colon or whitespace.", i)
|
||||
case !unicode.IsSpace(c) && c != ';':
|
||||
state = parserStateKey
|
||||
key += string(c)
|
||||
}
|
||||
|
||||
case parserStateKey:
|
||||
switch c {
|
||||
case '=':
|
||||
key = normalizeOdbcKey(key)
|
||||
if len(key) == 0 {
|
||||
return res, fmt.Errorf("Unexpected end of key at index %d.", i)
|
||||
}
|
||||
|
||||
state = parserStateBeginValue
|
||||
|
||||
case ';':
|
||||
// Key without value
|
||||
key = normalizeOdbcKey(key)
|
||||
if len(key) == 0 {
|
||||
return res, fmt.Errorf("Unexpected end of key at index %d.", i)
|
||||
}
|
||||
|
||||
res[key] = value
|
||||
key = ""
|
||||
value = ""
|
||||
state = parserStateBeforeKey
|
||||
|
||||
default:
|
||||
key += string(c)
|
||||
}
|
||||
|
||||
case parserStateBeginValue:
|
||||
switch {
|
||||
case c == '{':
|
||||
state = parserStateBracedValue
|
||||
case c == ';':
|
||||
// Empty value
|
||||
res[key] = value
|
||||
key = ""
|
||||
state = parserStateBeforeKey
|
||||
case unicode.IsSpace(c):
|
||||
// Ignore whitespace
|
||||
default:
|
||||
state = parserStateBareValue
|
||||
value += string(c)
|
||||
}
|
||||
|
||||
case parserStateBareValue:
|
||||
if c == ';' {
|
||||
res[key] = strings.TrimRightFunc(value, unicode.IsSpace)
|
||||
key = ""
|
||||
value = ""
|
||||
state = parserStateBeforeKey
|
||||
} else {
|
||||
value += string(c)
|
||||
}
|
||||
|
||||
case parserStateBracedValue:
|
||||
if c == '}' {
|
||||
state = parserStateBracedValueClosingBrace
|
||||
} else {
|
||||
value += string(c)
|
||||
}
|
||||
|
||||
case parserStateBracedValueClosingBrace:
|
||||
if c == '}' {
|
||||
// Escaped closing brace
|
||||
value += string(c)
|
||||
state = parserStateBracedValue
|
||||
continue
|
||||
}
|
||||
|
||||
// End of braced value
|
||||
res[key] = value
|
||||
key = ""
|
||||
value = ""
|
||||
|
||||
// This character is the first character past the end,
|
||||
// so it needs to be parsed like the parserStateEndValue state.
|
||||
state = parserStateEndValue
|
||||
switch {
|
||||
case c == ';':
|
||||
state = parserStateBeforeKey
|
||||
case unicode.IsSpace(c):
|
||||
// Ignore whitespace
|
||||
default:
|
||||
return res, fmt.Errorf("Unexpected character %c at index %d. Expected semi-colon or whitespace.", c, i)
|
||||
}
|
||||
|
||||
case parserStateEndValue:
|
||||
switch {
|
||||
case c == ';':
|
||||
state = parserStateBeforeKey
|
||||
case unicode.IsSpace(c):
|
||||
// Ignore whitespace
|
||||
default:
|
||||
return res, fmt.Errorf("Unexpected character %c at index %d. Expected semi-colon or whitespace.", c, i)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch state {
|
||||
case parserStateBeforeKey: // Okay
|
||||
case parserStateKey: // Unfinished key. Treat as key without value.
|
||||
key = normalizeOdbcKey(key)
|
||||
if len(key) == 0 {
|
||||
return res, fmt.Errorf("Unexpected end of key at index %d.", len(dsn))
|
||||
}
|
||||
res[key] = value
|
||||
case parserStateBeginValue: // Empty value
|
||||
res[key] = value
|
||||
case parserStateBareValue:
|
||||
res[key] = strings.TrimRightFunc(value, unicode.IsSpace)
|
||||
case parserStateBracedValue:
|
||||
return res, fmt.Errorf("Unexpected end of braced value at index %d.", len(dsn))
|
||||
case parserStateBracedValueClosingBrace: // End of braced value
|
||||
res[key] = value
|
||||
case parserStateEndValue: // Okay
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// Normalizes the given string as an ODBC-format key
|
||||
func normalizeOdbcKey(s string) string {
|
||||
return strings.ToLower(strings.TrimRightFunc(s, unicode.IsSpace))
|
||||
}
|
||||
|
||||
// Splits a URL of the form sqlserver://username:password@host/instance?param1=value¶m2=value
|
||||
func splitConnectionStringURL(dsn string) (map[string]string, error) {
|
||||
res := map[string]string{}
|
||||
|
||||
u, err := url.Parse(dsn)
|
||||
if err != nil {
|
||||
return res, err
|
||||
}
|
||||
|
||||
if u.Scheme != "sqlserver" {
|
||||
return res, fmt.Errorf("scheme %s is not recognized", u.Scheme)
|
||||
}
|
||||
|
||||
if u.User != nil {
|
||||
res["user id"] = u.User.Username()
|
||||
p, exists := u.User.Password()
|
||||
if exists {
|
||||
res["password"] = p
|
||||
}
|
||||
}
|
||||
|
||||
host, port, err := net.SplitHostPort(u.Host)
|
||||
if err != nil {
|
||||
host = u.Host
|
||||
}
|
||||
|
||||
if len(u.Path) > 0 {
|
||||
res["server"] = host + "\\" + u.Path[1:]
|
||||
} else {
|
||||
res["server"] = host
|
||||
}
|
||||
|
||||
if len(port) > 0 {
|
||||
res["port"] = port
|
||||
}
|
||||
|
||||
query := u.Query()
|
||||
for k, v := range query {
|
||||
if len(v) > 1 {
|
||||
return res, fmt.Errorf("key %s provided more than once", k)
|
||||
}
|
||||
res[k] = v[0]
|
||||
}
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func parseConnectParams(dsn string) (connectParams, error) {
|
||||
params := splitConnectionString(dsn)
|
||||
var p connectParams
|
||||
|
||||
var params map[string]string
|
||||
if strings.HasPrefix(dsn, "odbc:") {
|
||||
parameters, err := splitConnectionStringOdbc(dsn[len("odbc:"):])
|
||||
if err != nil {
|
||||
return p, err
|
||||
}
|
||||
params = parameters
|
||||
} else if strings.HasPrefix(dsn, "sqlserver://") {
|
||||
parameters, err := splitConnectionStringURL(dsn)
|
||||
if err != nil {
|
||||
return p, err
|
||||
}
|
||||
params = parameters
|
||||
} else {
|
||||
params = splitConnectionString(dsn)
|
||||
}
|
||||
|
||||
strlog, ok := params["log"]
|
||||
if ok {
|
||||
var err error
|
||||
@@ -859,9 +1093,9 @@ func dialConnection(p connectParams) (conn net.Conn, err error) {
|
||||
ips = []net.IP{ip}
|
||||
}
|
||||
if len(ips) == 1 {
|
||||
d := createDialer(p)
|
||||
d := createDialer(&p)
|
||||
addr := net.JoinHostPort(ips[0].String(), strconv.Itoa(int(p.port)))
|
||||
conn, err = d.Dial("tcp", addr)
|
||||
conn, err = d.Dial(addr)
|
||||
|
||||
} else {
|
||||
//Try Dials in parallel to avoid waiting for timeouts.
|
||||
@@ -870,9 +1104,9 @@ func dialConnection(p connectParams) (conn net.Conn, err error) {
|
||||
portStr := strconv.Itoa(int(p.port))
|
||||
for _, ip := range ips {
|
||||
go func(ip net.IP) {
|
||||
d := createDialer(p)
|
||||
d := createDialer(&p)
|
||||
addr := net.JoinHostPort(ip.String(), portStr)
|
||||
conn, err := d.Dial("tcp", addr)
|
||||
conn, err := d.Dial(addr)
|
||||
if err == nil {
|
||||
connChan <- conn
|
||||
} else {
|
||||
@@ -911,7 +1145,7 @@ func dialConnection(p connectParams) (conn net.Conn, err error) {
|
||||
return conn, err
|
||||
}
|
||||
|
||||
func connect(p connectParams) (res *tdsSession, err error) {
|
||||
func connect(log optionalLogger, p connectParams) (res *tdsSession, err error) {
|
||||
res = nil
|
||||
// if instance is specified use instance resolution service
|
||||
if p.instance != "" {
|
||||
@@ -944,6 +1178,7 @@ initiate_connection:
|
||||
outbuf := newTdsBuffer(4096, toconn)
|
||||
sess := tdsSession{
|
||||
buf: outbuf,
|
||||
log: log,
|
||||
logFlags: p.logFlags,
|
||||
}
|
||||
|
||||
|
||||
212
vendor/github.com/denisenkom/go-mssqldb/token.go
generated
vendored
212
vendor/github.com/denisenkom/go-mssqldb/token.go
generated
vendored
@@ -2,12 +2,13 @@ package mssql
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"net"
|
||||
"errors"
|
||||
)
|
||||
|
||||
// token ids
|
||||
@@ -90,7 +91,7 @@ func (d doneStruct) isError() bool {
|
||||
|
||||
func (d doneStruct) getError() Error {
|
||||
if len(d.errors) > 0 {
|
||||
return d.errors[len(d.errors) - 1]
|
||||
return d.errors[len(d.errors)-1]
|
||||
} else {
|
||||
return Error{Message: "Request failed but didn't provide reason"}
|
||||
}
|
||||
@@ -503,7 +504,7 @@ func processSingleResponse(sess *tdsSession, ch chan tokenStruct) {
|
||||
defer func() {
|
||||
if err := recover(); err != nil {
|
||||
if sess.logFlags&logErrors != 0 {
|
||||
sess.log.Printf("ERROR: Intercepted panick %v", err)
|
||||
sess.log.Printf("ERROR: Intercepted panic %v", err)
|
||||
}
|
||||
ch <- err
|
||||
}
|
||||
@@ -600,94 +601,155 @@ func processSingleResponse(sess *tdsSession, ch chan tokenStruct) {
|
||||
}
|
||||
}
|
||||
|
||||
func processResponse(ctx context.Context, sess *tdsSession, ch chan tokenStruct) {
|
||||
defer func() {
|
||||
close(ch)
|
||||
}()
|
||||
doneChan := ctx.Done()
|
||||
cancelInProgress := false
|
||||
cancelledByContext := false
|
||||
var cancelError error
|
||||
type parseRespIter byte
|
||||
|
||||
// loop over multiple responses
|
||||
for {
|
||||
if sess.logFlags&logDebug != 0 {
|
||||
sess.log.Println("initiating resonse reading")
|
||||
const (
|
||||
parseRespIterContinue parseRespIter = iota // Continue parsing current token.
|
||||
parseRespIterNext // Fetch the next token.
|
||||
parseRespIterDone // Done with parsing the response.
|
||||
)
|
||||
|
||||
type parseRespState byte
|
||||
|
||||
const (
|
||||
parseRespStateNormal parseRespState = iota // Normal response state.
|
||||
parseRespStateCancel // Query is canceled, wait for server to confirm.
|
||||
parseRespStateClosing // Waiting for tokens to come through.
|
||||
)
|
||||
|
||||
type parseResp struct {
|
||||
sess *tdsSession
|
||||
ctxDone <-chan struct{}
|
||||
state parseRespState
|
||||
cancelError error
|
||||
}
|
||||
|
||||
func (ts *parseResp) sendAttention(ch chan tokenStruct) parseRespIter {
|
||||
err := sendAttention(ts.sess.buf)
|
||||
if err != nil {
|
||||
ts.dlogf("failed to send attention signal %v", err)
|
||||
ch <- err
|
||||
return parseRespIterDone
|
||||
}
|
||||
tokChan := make(chan tokenStruct)
|
||||
go processSingleResponse(sess, tokChan)
|
||||
// loop over multiple tokens in response
|
||||
tokensLoop:
|
||||
for {
|
||||
ts.state = parseRespStateCancel
|
||||
return parseRespIterContinue
|
||||
}
|
||||
|
||||
func (ts *parseResp) dlog(msg string) {
|
||||
if ts.sess.logFlags&logDebug != 0 {
|
||||
ts.sess.log.Println(msg)
|
||||
}
|
||||
}
|
||||
func (ts *parseResp) dlogf(f string, v ...interface{}) {
|
||||
if ts.sess.logFlags&logDebug != 0 {
|
||||
ts.sess.log.Printf(f, v...)
|
||||
}
|
||||
}
|
||||
|
||||
func (ts *parseResp) iter(ctx context.Context, ch chan tokenStruct, tokChan chan tokenStruct) parseRespIter {
|
||||
switch ts.state {
|
||||
default:
|
||||
panic("unknown state")
|
||||
case parseRespStateNormal:
|
||||
select {
|
||||
case tok, ok := <-tokChan:
|
||||
if ok {
|
||||
if cancelInProgress {
|
||||
if !ok {
|
||||
ts.dlog("response finished")
|
||||
return parseRespIterDone
|
||||
}
|
||||
if err, ok := tok.(net.Error); ok && err.Timeout() {
|
||||
ts.cancelError = err
|
||||
ts.dlog("got timeout error, sending attention signal to server")
|
||||
return ts.sendAttention(ch)
|
||||
}
|
||||
// Pass the token along.
|
||||
ch <- tok
|
||||
return parseRespIterContinue
|
||||
|
||||
case <-ts.ctxDone:
|
||||
ts.ctxDone = nil
|
||||
ts.dlog("got cancel message, sending attention signal to server")
|
||||
return ts.sendAttention(ch)
|
||||
}
|
||||
case parseRespStateCancel: // Read all responses until a DONE or error is received.Auth
|
||||
select {
|
||||
case tok, ok := <-tokChan:
|
||||
if !ok {
|
||||
ts.dlog("response finished but waiting for attention ack")
|
||||
return parseRespIterNext
|
||||
}
|
||||
switch tok := tok.(type) {
|
||||
default:
|
||||
// Ignore all other tokens while waiting.
|
||||
// The TDS spec says other tokens may arrive after an attention
|
||||
// signal is sent. Ignore these tokens and continue looking for
|
||||
// a DONE with attention confirm mark.
|
||||
case doneStruct:
|
||||
if tok.Status&doneAttn != 0 {
|
||||
if sess.logFlags&logDebug != 0 {
|
||||
sess.log.Println("got cancellation confirmation from server")
|
||||
}
|
||||
if cancelledByContext {
|
||||
ts.dlog("got cancellation confirmation from server")
|
||||
if ts.cancelError != nil {
|
||||
ch <- ts.cancelError
|
||||
ts.cancelError = nil
|
||||
} else {
|
||||
ch <- ctx.Err()
|
||||
} else {
|
||||
ch <- cancelError
|
||||
}
|
||||
return
|
||||
return parseRespIterDone
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if err, ok := tok.(net.Error); ok && err.Timeout() {
|
||||
cancelError = err
|
||||
if sess.logFlags&logDebug != 0 {
|
||||
sess.log.Println("got timeout error, sending attention signal to server")
|
||||
}
|
||||
err := sendAttention(sess.buf)
|
||||
if err != nil {
|
||||
if sess.logFlags&logErrors != 0 {
|
||||
sess.log.Println("Failed to send attention signal %v", err)
|
||||
}
|
||||
ch <- err
|
||||
return
|
||||
}
|
||||
doneChan = nil
|
||||
cancelInProgress = true
|
||||
cancelledByContext = false
|
||||
} else {
|
||||
|
||||
// If an error happens during cancel, pass it along and just stop.
|
||||
// We are uncertain to receive more tokens.
|
||||
case error:
|
||||
ch <- tok
|
||||
ts.state = parseRespStateClosing
|
||||
}
|
||||
return parseRespIterContinue
|
||||
case <-ts.ctxDone:
|
||||
ts.ctxDone = nil
|
||||
ts.state = parseRespStateClosing
|
||||
return parseRespIterContinue
|
||||
}
|
||||
} else {
|
||||
// response finished
|
||||
if cancelInProgress {
|
||||
if sess.logFlags&logDebug != 0 {
|
||||
sess.log.Println("response finished but waiting for attention ack")
|
||||
case parseRespStateClosing: // Wait for current token chan to close.
|
||||
if _, ok := <-tokChan; !ok {
|
||||
ts.dlog("response finished")
|
||||
return parseRespIterDone
|
||||
}
|
||||
return parseRespIterContinue
|
||||
}
|
||||
}
|
||||
|
||||
func processResponse(ctx context.Context, sess *tdsSession, ch chan tokenStruct) {
|
||||
ts := &parseResp{
|
||||
ctxDone: ctx.Done(),
|
||||
sess: sess,
|
||||
}
|
||||
defer func() {
|
||||
// Ensure any remaining error is piped through
|
||||
// or the query may look like it executed when it actually failed.
|
||||
if ts.cancelError != nil {
|
||||
ch <- ts.cancelError
|
||||
ts.cancelError = nil
|
||||
}
|
||||
close(ch)
|
||||
}()
|
||||
|
||||
// Loop over multiple responses.
|
||||
for {
|
||||
ts.dlog("initiating resonse reading")
|
||||
|
||||
tokChan := make(chan tokenStruct)
|
||||
go processSingleResponse(sess, tokChan)
|
||||
|
||||
// Loop over multiple tokens in response.
|
||||
tokensLoop:
|
||||
for {
|
||||
switch ts.iter(ctx, ch, tokChan) {
|
||||
case parseRespIterContinue:
|
||||
// Nothing, continue to next token.
|
||||
case parseRespIterNext:
|
||||
break tokensLoop
|
||||
} else {
|
||||
if sess.logFlags&logDebug != 0 {
|
||||
sess.log.Println("response finished")
|
||||
}
|
||||
case parseRespIterDone:
|
||||
return
|
||||
}
|
||||
}
|
||||
case <-doneChan:
|
||||
if sess.logFlags&logDebug != 0 {
|
||||
sess.log.Println("got cancel message, sending attention signal to server")
|
||||
}
|
||||
err := sendAttention(sess.buf)
|
||||
if err != nil {
|
||||
if sess.logFlags&logErrors != 0 {
|
||||
sess.log.Println("Failed to send attention signal %v", err)
|
||||
}
|
||||
ch <- err
|
||||
return
|
||||
}
|
||||
doneChan = nil
|
||||
cancelInProgress = true
|
||||
cancelledByContext = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
3
vendor/github.com/denisenkom/go-mssqldb/tran.go
generated
vendored
3
vendor/github.com/denisenkom/go-mssqldb/tran.go
generated
vendored
@@ -1,6 +1,7 @@
|
||||
package mssql
|
||||
|
||||
// Transaction Manager requests
|
||||
// http://msdn.microsoft.com/en-us/library/dd339887.aspx
|
||||
package mssql
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
|
||||
11
vendor/github.com/denisenkom/go-mssqldb/types.go
generated
vendored
11
vendor/github.com/denisenkom/go-mssqldb/types.go
generated
vendored
@@ -134,10 +134,11 @@ func writeVarLen(w io.Writer, ti *typeInfo) (err error) {
|
||||
return
|
||||
}
|
||||
ti.Writer = writeByteLenType
|
||||
case typeGuid, typeIntN, typeDecimal, typeNumeric,
|
||||
case typeIntN, typeDecimal, typeNumeric,
|
||||
typeBitN, typeDecimalN, typeNumericN, typeFltN,
|
||||
typeMoneyN, typeDateTimeN, typeChar,
|
||||
typeVarChar, typeBinary, typeVarBinary:
|
||||
|
||||
// byle len types
|
||||
if ti.Size > 0xff {
|
||||
panic("Invalid size for BYLELEN_TYPE")
|
||||
@@ -157,6 +158,14 @@ func writeVarLen(w io.Writer, ti *typeInfo) (err error) {
|
||||
}
|
||||
}
|
||||
ti.Writer = writeByteLenType
|
||||
case typeGuid:
|
||||
if !(ti.Size == 0x10 || ti.Size == 0x00) {
|
||||
panic("Invalid size for BYLELEN_TYPE")
|
||||
}
|
||||
if err = binary.Write(w, binary.LittleEndian, uint8(ti.Size)); err != nil {
|
||||
return
|
||||
}
|
||||
ti.Writer = writeByteLenType
|
||||
case typeBigVarBin, typeBigVarChar, typeBigBinary, typeBigChar,
|
||||
typeNVarChar, typeNChar, typeXml, typeUdt:
|
||||
// short len types
|
||||
|
||||
74
vendor/github.com/denisenkom/go-mssqldb/uniqueidentifier.go
generated
vendored
Normal file
74
vendor/github.com/denisenkom/go-mssqldb/uniqueidentifier.go
generated
vendored
Normal file
@@ -0,0 +1,74 @@
|
||||
package mssql
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type UniqueIdentifier [16]byte
|
||||
|
||||
func (u *UniqueIdentifier) Scan(v interface{}) error {
|
||||
reverse := func(b []byte) {
|
||||
for i, j := 0, len(b)-1; i < j; i, j = i+1, j-1 {
|
||||
b[i], b[j] = b[j], b[i]
|
||||
}
|
||||
}
|
||||
|
||||
switch vt := v.(type) {
|
||||
case []byte:
|
||||
if len(vt) != 16 {
|
||||
return errors.New("mssql: invalid UniqueIdentifier length")
|
||||
}
|
||||
|
||||
var raw UniqueIdentifier
|
||||
|
||||
copy(raw[:], vt)
|
||||
|
||||
reverse(raw[0:4])
|
||||
reverse(raw[4:6])
|
||||
reverse(raw[6:8])
|
||||
*u = raw
|
||||
|
||||
return nil
|
||||
case string:
|
||||
if len(vt) != 36 {
|
||||
return errors.New("mssql: invalid UniqueIdentifier string length")
|
||||
}
|
||||
|
||||
b := []byte(vt)
|
||||
for i, c := range b {
|
||||
switch c {
|
||||
case '-':
|
||||
b = append(b[:i], b[i+1:]...)
|
||||
}
|
||||
}
|
||||
|
||||
_, err := hex.Decode(u[:], []byte(b))
|
||||
return err
|
||||
default:
|
||||
return fmt.Errorf("mssql: cannot convert %T to UniqueIdentifier", v)
|
||||
}
|
||||
}
|
||||
|
||||
func (u UniqueIdentifier) Value() (driver.Value, error) {
|
||||
reverse := func(b []byte) {
|
||||
for i, j := 0, len(b)-1; i < j; i, j = i+1, j-1 {
|
||||
b[i], b[j] = b[j], b[i]
|
||||
}
|
||||
}
|
||||
|
||||
raw := make([]byte, len(u))
|
||||
copy(raw, u[:])
|
||||
|
||||
reverse(raw[0:4])
|
||||
reverse(raw[4:6])
|
||||
reverse(raw[6:8])
|
||||
|
||||
return raw, nil
|
||||
}
|
||||
|
||||
func (u UniqueIdentifier) String() string {
|
||||
return fmt.Sprintf("%X-%X-%X-%X-%X", u[0:4], u[4:6], u[6:8], u[8:10], u[10:])
|
||||
}
|
||||
50
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go
generated
vendored
50
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go
generated
vendored
@@ -15,12 +15,17 @@ import (
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
// MetadataHeaderPrefix is prepended to HTTP headers in order to convert them to
|
||||
// gRPC metadata for incoming requests processed by grpc-gateway
|
||||
// MetadataHeaderPrefix is the http prefix that represents custom metadata
|
||||
// parameters to or from a gRPC call.
|
||||
const MetadataHeaderPrefix = "Grpc-Metadata-"
|
||||
|
||||
// MetadataPrefix is the prefix for grpc-gateway supplied custom metadata fields.
|
||||
const MetadataPrefix = "grpcgateway-"
|
||||
|
||||
// MetadataTrailerPrefix is prepended to gRPC metadata as it is converted to
|
||||
// HTTP headers in a response handled by grpc-gateway
|
||||
const MetadataTrailerPrefix = "Grpc-Trailer-"
|
||||
|
||||
const metadataGrpcTimeout = "Grpc-Timeout"
|
||||
|
||||
const xForwardedFor = "X-Forwarded-For"
|
||||
@@ -52,8 +57,12 @@ func AnnotateContext(ctx context.Context, req *http.Request) (context.Context, e
|
||||
|
||||
for key, vals := range req.Header {
|
||||
for _, val := range vals {
|
||||
if key == "Authorization" {
|
||||
// For backwards-compatibility, pass through 'authorization' header with no prefix.
|
||||
if strings.ToLower(key) == "authorization" {
|
||||
pairs = append(pairs, "authorization", val)
|
||||
}
|
||||
if isPermanentHTTPHeader(key) {
|
||||
pairs = append(pairs, strings.ToLower(fmt.Sprintf("%s%s", MetadataPrefix, key)), val)
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(key, MetadataHeaderPrefix) {
|
||||
@@ -141,3 +150,38 @@ func timeoutUnitToDuration(u uint8) (d time.Duration, ok bool) {
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// isPermanentHTTPHeader checks whether hdr belongs to the list of
|
||||
// permenant request headers maintained by IANA.
|
||||
// http://www.iana.org/assignments/message-headers/message-headers.xml
|
||||
func isPermanentHTTPHeader(hdr string) bool {
|
||||
switch hdr {
|
||||
case
|
||||
"Accept",
|
||||
"Accept-Charset",
|
||||
"Accept-Language",
|
||||
"Accept-Ranges",
|
||||
"Authorization",
|
||||
"Cache-Control",
|
||||
"Content-Type",
|
||||
"Cookie",
|
||||
"Date",
|
||||
"Expect",
|
||||
"From",
|
||||
"Host",
|
||||
"If-Match",
|
||||
"If-Modified-Since",
|
||||
"If-None-Match",
|
||||
"If-Schedule-Tag-Match",
|
||||
"If-Unmodified-Since",
|
||||
"Max-Forwards",
|
||||
"Origin",
|
||||
"Pragma",
|
||||
"Referer",
|
||||
"User-Agent",
|
||||
"Via",
|
||||
"Warning":
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
32
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go
generated
vendored
32
vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go
generated
vendored
@@ -5,6 +5,7 @@ import (
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/utilities"
|
||||
@@ -62,7 +63,7 @@ func populateFieldValueFromPath(msg proto.Message, fieldPath []string, values []
|
||||
case reflect.Ptr:
|
||||
if f.IsNil() {
|
||||
m = reflect.New(f.Type().Elem())
|
||||
f.Set(m)
|
||||
f.Set(m.Convert(f.Type()))
|
||||
}
|
||||
m = f.Elem()
|
||||
continue
|
||||
@@ -101,18 +102,41 @@ func populateRepeatedField(f reflect.Value, values []string) error {
|
||||
if !ok {
|
||||
return fmt.Errorf("unsupported field type %s", elemType)
|
||||
}
|
||||
f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)))
|
||||
f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type()))
|
||||
for i, v := range values {
|
||||
result := conv.Call([]reflect.Value{reflect.ValueOf(v)})
|
||||
if err := result[1].Interface(); err != nil {
|
||||
return err.(error)
|
||||
}
|
||||
f.Index(i).Set(result[0])
|
||||
f.Index(i).Set(result[0].Convert(f.Index(i).Type()))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func populateField(f reflect.Value, value string) error {
|
||||
// Handle well known type
|
||||
type wkt interface {
|
||||
XXX_WellKnownType() string
|
||||
}
|
||||
if wkt, ok := f.Addr().Interface().(wkt); ok {
|
||||
switch wkt.XXX_WellKnownType() {
|
||||
case "Timestamp":
|
||||
if value == "null" {
|
||||
f.Field(0).SetInt(0)
|
||||
f.Field(1).SetInt(0)
|
||||
return nil
|
||||
}
|
||||
|
||||
t, err := time.Parse(time.RFC3339Nano, value)
|
||||
if err != nil {
|
||||
return fmt.Errorf("bad Timestamp: %v", err)
|
||||
}
|
||||
f.Field(0).SetInt(int64(t.Unix()))
|
||||
f.Field(1).SetInt(int64(t.Nanosecond()))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
conv, ok := convFromType[f.Kind()]
|
||||
if !ok {
|
||||
return fmt.Errorf("unsupported field type %T", f)
|
||||
@@ -121,7 +145,7 @@ func populateField(f reflect.Value, value string) error {
|
||||
if err := result[1].Interface(); err != nil {
|
||||
return err.(error)
|
||||
}
|
||||
f.Set(result[0])
|
||||
f.Set(result[0].Convert(f.Type()))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
10
vendor/github.com/hashicorp/consul/api/api.go
generated
vendored
10
vendor/github.com/hashicorp/consul/api/api.go
generated
vendored
@@ -74,6 +74,11 @@ type QueryOptions struct {
|
||||
// that node. Setting this to "_agent" will use the agent's node
|
||||
// for the sort.
|
||||
Near string
|
||||
|
||||
// NodeMeta is used to filter results by nodes with the given
|
||||
// metadata key/value pairs. Currently, only one key/value pair can
|
||||
// be provided for filtering.
|
||||
NodeMeta map[string]string
|
||||
}
|
||||
|
||||
// WriteOptions are used to parameterize a write
|
||||
@@ -386,6 +391,11 @@ func (r *request) setQueryOptions(q *QueryOptions) {
|
||||
if q.Near != "" {
|
||||
r.params.Set("near", q.Near)
|
||||
}
|
||||
if len(q.NodeMeta) > 0 {
|
||||
for key, value := range q.NodeMeta {
|
||||
r.params.Add("node-meta", key+":"+value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// durToMsec converts a duration to a millisecond specified string. If the
|
||||
|
||||
3
vendor/github.com/hashicorp/consul/api/catalog.go
generated
vendored
3
vendor/github.com/hashicorp/consul/api/catalog.go
generated
vendored
@@ -4,12 +4,14 @@ type Node struct {
|
||||
Node string
|
||||
Address string
|
||||
TaggedAddresses map[string]string
|
||||
Meta map[string]string
|
||||
}
|
||||
|
||||
type CatalogService struct {
|
||||
Node string
|
||||
Address string
|
||||
TaggedAddresses map[string]string
|
||||
NodeMeta map[string]string
|
||||
ServiceID string
|
||||
ServiceName string
|
||||
ServiceAddress string
|
||||
@@ -29,6 +31,7 @@ type CatalogRegistration struct {
|
||||
Node string
|
||||
Address string
|
||||
TaggedAddresses map[string]string
|
||||
NodeMeta map[string]string
|
||||
Datacenter string
|
||||
Service *AgentService
|
||||
Check *AgentCheck
|
||||
|
||||
17
vendor/github.com/mitchellh/copystructure/copystructure.go
generated
vendored
17
vendor/github.com/mitchellh/copystructure/copystructure.go
generated
vendored
@@ -405,6 +405,23 @@ func (w *walker) replacePointerMaybe() {
|
||||
}
|
||||
|
||||
v := w.valPop()
|
||||
|
||||
// If the expected type is a pointer to an interface of any depth,
|
||||
// such as *interface{}, **interface{}, etc., then we need to convert
|
||||
// the value "v" from *CONCRETE to *interface{} so types match for
|
||||
// Set.
|
||||
//
|
||||
// Example if v is type *Foo where Foo is a struct, v would become
|
||||
// *interface{} instead. This only happens if we have an interface expectation
|
||||
// at this depth.
|
||||
//
|
||||
// For more info, see GH-16
|
||||
if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)]; ok && iType.Kind() == reflect.Interface {
|
||||
y := reflect.New(iType) // Create *interface{}
|
||||
y.Elem().Set(reflect.Indirect(v)) // Assign "Foo" to interface{} (dereferenced)
|
||||
v = y // v is now typed *interface{} (where *v = Foo)
|
||||
}
|
||||
|
||||
for i := 1; i < w.ps[w.depth]; i++ {
|
||||
if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth]-i, w.depth)]; ok {
|
||||
iface := reflect.New(iType).Elem()
|
||||
|
||||
2
vendor/github.com/mitchellh/reflectwalk/location.go
generated
vendored
2
vendor/github.com/mitchellh/reflectwalk/location.go
generated
vendored
@@ -11,6 +11,8 @@ const (
|
||||
MapValue
|
||||
Slice
|
||||
SliceElem
|
||||
Array
|
||||
ArrayElem
|
||||
Struct
|
||||
StructField
|
||||
WalkLoc
|
||||
|
||||
53
vendor/github.com/mitchellh/reflectwalk/reflectwalk.go
generated
vendored
53
vendor/github.com/mitchellh/reflectwalk/reflectwalk.go
generated
vendored
@@ -39,6 +39,13 @@ type SliceWalker interface {
|
||||
SliceElem(int, reflect.Value) error
|
||||
}
|
||||
|
||||
// ArrayWalker implementations are able to handle array elements found
|
||||
// within complex structures.
|
||||
type ArrayWalker interface {
|
||||
Array(reflect.Value) error
|
||||
ArrayElem(int, reflect.Value) error
|
||||
}
|
||||
|
||||
// StructWalker is an interface that has methods that are called for
|
||||
// structs when a Walk is done.
|
||||
type StructWalker interface {
|
||||
@@ -179,6 +186,9 @@ func walk(v reflect.Value, w interface{}) (err error) {
|
||||
case reflect.Struct:
|
||||
err = walkStruct(v, w)
|
||||
return
|
||||
case reflect.Array:
|
||||
err = walkArray(v, w)
|
||||
return
|
||||
default:
|
||||
panic("unsupported type: " + k.String())
|
||||
}
|
||||
@@ -286,6 +296,49 @@ func walkSlice(v reflect.Value, w interface{}) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
func walkArray(v reflect.Value, w interface{}) (err error) {
|
||||
ew, ok := w.(EnterExitWalker)
|
||||
if ok {
|
||||
ew.Enter(Array)
|
||||
}
|
||||
|
||||
if aw, ok := w.(ArrayWalker); ok {
|
||||
if err := aw.Array(v); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
elem := v.Index(i)
|
||||
|
||||
if aw, ok := w.(ArrayWalker); ok {
|
||||
if err := aw.ArrayElem(i, elem); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
ew, ok := w.(EnterExitWalker)
|
||||
if ok {
|
||||
ew.Enter(ArrayElem)
|
||||
}
|
||||
|
||||
if err := walk(elem, w); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ok {
|
||||
ew.Exit(ArrayElem)
|
||||
}
|
||||
}
|
||||
|
||||
ew, ok = w.(EnterExitWalker)
|
||||
if ok {
|
||||
ew.Exit(Array)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func walkStruct(v reflect.Value, w interface{}) (err error) {
|
||||
ew, ewok := w.(EnterExitWalker)
|
||||
if ewok {
|
||||
|
||||
34
vendor/github.com/pborman/uuid/json.go
generated
vendored
34
vendor/github.com/pborman/uuid/json.go
generated
vendored
@@ -1,34 +0,0 @@
|
||||
// Copyright 2014 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import "errors"
|
||||
|
||||
func (u UUID) MarshalJSON() ([]byte, error) {
|
||||
if len(u) != 16 {
|
||||
return []byte(`""`), nil
|
||||
}
|
||||
var js [38]byte
|
||||
js[0] = '"'
|
||||
encodeHex(js[1:], u)
|
||||
js[37] = '"'
|
||||
return js[:], nil
|
||||
}
|
||||
|
||||
func (u *UUID) UnmarshalJSON(data []byte) error {
|
||||
if string(data) == `""` {
|
||||
return nil
|
||||
}
|
||||
if data[0] != '"' {
|
||||
return errors.New("invalid UUID format")
|
||||
}
|
||||
data = data[1 : len(data)-1]
|
||||
uu := Parse(string(data))
|
||||
if uu == nil {
|
||||
return errors.New("invalid UUID format")
|
||||
}
|
||||
*u = uu
|
||||
return nil
|
||||
}
|
||||
83
vendor/github.com/pborman/uuid/marshal.go
generated
vendored
Normal file
83
vendor/github.com/pborman/uuid/marshal.go
generated
vendored
Normal file
@@ -0,0 +1,83 @@
|
||||
// Copyright 2016 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// MarshalText implements encoding.TextMarshaler.
|
||||
func (u UUID) MarshalText() ([]byte, error) {
|
||||
if len(u) != 16 {
|
||||
return nil, nil
|
||||
}
|
||||
var js [36]byte
|
||||
encodeHex(js[:], u)
|
||||
return js[:], nil
|
||||
}
|
||||
|
||||
// UnmarshalText implements encoding.TextUnmarshaler.
|
||||
func (u *UUID) UnmarshalText(data []byte) error {
|
||||
if len(data) == 0 {
|
||||
return nil
|
||||
}
|
||||
id := Parse(string(data))
|
||||
if id == nil {
|
||||
return errors.New("invalid UUID")
|
||||
}
|
||||
*u = id
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalBinary implements encoding.BinaryMarshaler.
|
||||
func (u UUID) MarshalBinary() ([]byte, error) {
|
||||
return u[:], nil
|
||||
}
|
||||
|
||||
// UnmarshalBinary implements encoding.BinaryUnmarshaler.
|
||||
func (u *UUID) UnmarshalBinary(data []byte) error {
|
||||
if len(data) == 0 {
|
||||
return nil
|
||||
}
|
||||
if len(data) != 16 {
|
||||
return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
|
||||
}
|
||||
var id [16]byte
|
||||
copy(id[:], data)
|
||||
*u = id[:]
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalText implements encoding.TextMarshaler.
|
||||
func (u Array) MarshalText() ([]byte, error) {
|
||||
var js [36]byte
|
||||
encodeHex(js[:], u[:])
|
||||
return js[:], nil
|
||||
}
|
||||
|
||||
// UnmarshalText implements encoding.TextUnmarshaler.
|
||||
func (u *Array) UnmarshalText(data []byte) error {
|
||||
id := Parse(string(data))
|
||||
if id == nil {
|
||||
return errors.New("invalid UUID")
|
||||
}
|
||||
*u = id.Array()
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalBinary implements encoding.BinaryMarshaler.
|
||||
func (u Array) MarshalBinary() ([]byte, error) {
|
||||
return u[:], nil
|
||||
}
|
||||
|
||||
// UnmarshalBinary implements encoding.BinaryUnmarshaler.
|
||||
func (u *Array) UnmarshalBinary(data []byte) error {
|
||||
if len(data) != 16 {
|
||||
return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
|
||||
}
|
||||
copy(u[:], data)
|
||||
return nil
|
||||
}
|
||||
47
vendor/github.com/prometheus/common/expfmt/decode.go
generated
vendored
47
vendor/github.com/prometheus/common/expfmt/decode.go
generated
vendored
@@ -31,6 +31,7 @@ type Decoder interface {
|
||||
Decode(*dto.MetricFamily) error
|
||||
}
|
||||
|
||||
// DecodeOptions contains options used by the Decoder and in sample extraction.
|
||||
type DecodeOptions struct {
|
||||
// Timestamp is added to each value from the stream that has no explicit timestamp set.
|
||||
Timestamp model.Time
|
||||
@@ -142,6 +143,8 @@ func (d *textDecoder) Decode(v *dto.MetricFamily) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// SampleDecoder wraps a Decoder to extract samples from the metric families
|
||||
// decoded by the wrapped Decoder.
|
||||
type SampleDecoder struct {
|
||||
Dec Decoder
|
||||
Opts *DecodeOptions
|
||||
@@ -149,37 +152,51 @@ type SampleDecoder struct {
|
||||
f dto.MetricFamily
|
||||
}
|
||||
|
||||
// Decode calls the Decode method of the wrapped Decoder and then extracts the
|
||||
// samples from the decoded MetricFamily into the provided model.Vector.
|
||||
func (sd *SampleDecoder) Decode(s *model.Vector) error {
|
||||
if err := sd.Dec.Decode(&sd.f); err != nil {
|
||||
err := sd.Dec.Decode(&sd.f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
*s = extractSamples(&sd.f, sd.Opts)
|
||||
return nil
|
||||
*s, err = extractSamples(&sd.f, sd.Opts)
|
||||
return err
|
||||
}
|
||||
|
||||
// Extract samples builds a slice of samples from the provided metric families.
|
||||
func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) model.Vector {
|
||||
var all model.Vector
|
||||
// ExtractSamples builds a slice of samples from the provided metric
|
||||
// families. If an error occurs during sample extraction, it continues to
|
||||
// extract from the remaining metric families. The returned error is the last
|
||||
// error that has occured.
|
||||
func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) {
|
||||
var (
|
||||
all model.Vector
|
||||
lastErr error
|
||||
)
|
||||
for _, f := range fams {
|
||||
all = append(all, extractSamples(f, o)...)
|
||||
some, err := extractSamples(f, o)
|
||||
if err != nil {
|
||||
lastErr = err
|
||||
continue
|
||||
}
|
||||
return all
|
||||
all = append(all, some...)
|
||||
}
|
||||
return all, lastErr
|
||||
}
|
||||
|
||||
func extractSamples(f *dto.MetricFamily, o *DecodeOptions) model.Vector {
|
||||
func extractSamples(f *dto.MetricFamily, o *DecodeOptions) (model.Vector, error) {
|
||||
switch f.GetType() {
|
||||
case dto.MetricType_COUNTER:
|
||||
return extractCounter(o, f)
|
||||
return extractCounter(o, f), nil
|
||||
case dto.MetricType_GAUGE:
|
||||
return extractGauge(o, f)
|
||||
return extractGauge(o, f), nil
|
||||
case dto.MetricType_SUMMARY:
|
||||
return extractSummary(o, f)
|
||||
return extractSummary(o, f), nil
|
||||
case dto.MetricType_UNTYPED:
|
||||
return extractUntyped(o, f)
|
||||
return extractUntyped(o, f), nil
|
||||
case dto.MetricType_HISTOGRAM:
|
||||
return extractHistogram(o, f)
|
||||
return extractHistogram(o, f), nil
|
||||
}
|
||||
panic("expfmt.extractSamples: unknown metric family type")
|
||||
return nil, fmt.Errorf("expfmt.extractSamples: unknown metric family type %v", f.GetType())
|
||||
}
|
||||
|
||||
func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector {
|
||||
|
||||
5
vendor/github.com/prometheus/common/expfmt/expfmt.go
generated
vendored
5
vendor/github.com/prometheus/common/expfmt/expfmt.go
generated
vendored
@@ -11,14 +11,15 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// A package for reading and writing Prometheus metrics.
|
||||
// Package expfmt contains tools for reading and writing Prometheus metrics.
|
||||
package expfmt
|
||||
|
||||
// Format specifies the HTTP content type of the different wire protocols.
|
||||
type Format string
|
||||
|
||||
// Constants to assemble the Content-Type values for the different wire protocols.
|
||||
const (
|
||||
TextVersion = "0.0.4"
|
||||
|
||||
ProtoType = `application/vnd.google.protobuf`
|
||||
ProtoProtocol = `io.prometheus.client.MetricFamily`
|
||||
ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
|
||||
|
||||
2
vendor/github.com/ugorji/go/codec/gen-helper.generated.go
generated
vendored
2
vendor/github.com/ugorji/go/codec/gen-helper.generated.go
generated
vendored
@@ -1,4 +1,4 @@
|
||||
// // +build ignore
|
||||
/* // +build ignore */
|
||||
|
||||
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
2
vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl
generated
vendored
2
vendor/github.com/ugorji/go/codec/gen-helper.go.tmpl
generated
vendored
@@ -1,4 +1,4 @@
|
||||
// // +build ignore
|
||||
/* // +build ignore */
|
||||
|
||||
// Copyright (c) 2012-2015 Ugorji Nwoke. All rights reserved.
|
||||
// Use of this source code is governed by a MIT license found in the LICENSE file.
|
||||
|
||||
12
vendor/github.com/ugorji/go/codec/gen.go
generated
vendored
12
vendor/github.com/ugorji/go/codec/gen.go
generated
vendored
@@ -165,15 +165,9 @@ type genRunner struct {
|
||||
//
|
||||
// Library users: *DO NOT USE IT DIRECTLY. IT WILL CHANGE CONTINOUSLY WITHOUT NOTICE.*
|
||||
func Gen(w io.Writer, buildTags, pkgName, uid string, useUnsafe bool, ti *TypeInfos, typ ...reflect.Type) {
|
||||
// trim out all types which already implement Selfer
|
||||
typ2 := make([]reflect.Type, 0, len(typ))
|
||||
for _, t := range typ {
|
||||
if reflect.PtrTo(t).Implements(selferTyp) || t.Implements(selferTyp) {
|
||||
continue
|
||||
}
|
||||
typ2 = append(typ2, t)
|
||||
}
|
||||
typ = typ2
|
||||
// All types passed to this method do not have a codec.Selfer method implemented directly.
|
||||
// codecgen already checks the AST and skips any types that define the codec.Selfer methods.
|
||||
// Consequently, there's no need to check and trim them if they implement codec.Selfer
|
||||
|
||||
if len(typ) == 0 {
|
||||
return
|
||||
|
||||
2
vendor/github.com/ugorji/go/codec/simple.go
generated
vendored
2
vendor/github.com/ugorji/go/codec/simple.go
generated
vendored
@@ -347,7 +347,7 @@ func (d *simpleDecDriver) decLen() int {
|
||||
}
|
||||
return int(ui)
|
||||
}
|
||||
d.d.errorf("decLen: Cannot read length: bd%8 must be in range 0..4. Got: %d", d.bd%8)
|
||||
d.d.errorf("decLen: Cannot read length: bd%%8 must be in range 0..4. Got: %d", d.bd%8)
|
||||
return -1
|
||||
}
|
||||
|
||||
|
||||
24
vendor/github.com/ugorji/go/codec/tests.sh
generated
vendored
24
vendor/github.com/ugorji/go/codec/tests.sh
generated
vendored
@@ -59,7 +59,7 @@ _run() {
|
||||
}
|
||||
|
||||
# echo ">>>>>>> RUNNING VARIATIONS OF TESTS"
|
||||
if [[ "x$@" = "x" ]]; then
|
||||
if [[ "x$@" = "x" || "x$@" = "x-A" ]]; then
|
||||
# All: r, x, g, gu
|
||||
_run "-_tcinsed_ml" # regular
|
||||
_run "-_tcinsed_ml_z" # regular with reset
|
||||
@@ -75,6 +75,28 @@ elif [[ "x$@" = "x-F" ]]; then
|
||||
# regular with notfastpath
|
||||
_run "-_tcinsed_ml_f" # regular
|
||||
_run "-_tcinsed_ml_zf" # regular with reset
|
||||
elif [[ "x$@" = "x-C" ]]; then
|
||||
# codecgen
|
||||
_run "-gx_tcinsed_ml" # codecgen: requires external
|
||||
_run "-gxu_tcinsed_ml" # codecgen + unsafe
|
||||
elif [[ "x$@" = "x-X" ]]; then
|
||||
# external
|
||||
_run "-x_tcinsed_ml" # external
|
||||
elif [[ "x$@" = "x-h" || "x$@" = "x-?" ]]; then
|
||||
cat <<EOF
|
||||
Usage: tests.sh [options...]
|
||||
-A run through all tests (regular, external, codecgen)
|
||||
-Z regular tests only
|
||||
-F regular tests only (without fastpath, so they run quickly)
|
||||
-C codecgen only
|
||||
-X external only
|
||||
-h show help (usage)
|
||||
-? same as -h
|
||||
(no options)
|
||||
same as -A
|
||||
(unrecognized options)
|
||||
just pass on the options from the command line
|
||||
EOF
|
||||
else
|
||||
_run "$@"
|
||||
fi
|
||||
|
||||
2
vendor/golang.org/x/crypto/ssh/terminal/terminal.go
generated
vendored
2
vendor/golang.org/x/crypto/ssh/terminal/terminal.go
generated
vendored
@@ -772,8 +772,6 @@ func (t *Terminal) readLine() (line string, err error) {
|
||||
|
||||
t.remainder = t.inBuf[:n+len(t.remainder)]
|
||||
}
|
||||
|
||||
panic("unreachable") // for Go 1.0.
|
||||
}
|
||||
|
||||
// SetPrompt sets the prompt to be used when reading subsequent lines.
|
||||
|
||||
4
vendor/golang.org/x/crypto/ssh/terminal/util.go
generated
vendored
4
vendor/golang.org/x/crypto/ssh/terminal/util.go
generated
vendored
@@ -72,8 +72,10 @@ func GetState(fd int) (*State, error) {
|
||||
// Restore restores the terminal connected to the given file descriptor to a
|
||||
// previous state.
|
||||
func Restore(fd int, state *State) error {
|
||||
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&state.termios)), 0, 0, 0)
|
||||
if _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlWriteTermios, uintptr(unsafe.Pointer(&state.termios)), 0, 0, 0); err != 0 {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetSize returns the dimensions of the given terminal.
|
||||
|
||||
14
vendor/golang.org/x/crypto/ssh/transport.go
generated
vendored
14
vendor/golang.org/x/crypto/ssh/transport.go
generated
vendored
@@ -85,8 +85,18 @@ func (t *transport) prepareKeyChange(algs *algorithms, kexResult *kexResult) err
|
||||
}
|
||||
|
||||
// Read and decrypt next packet.
|
||||
func (t *transport) readPacket() ([]byte, error) {
|
||||
return t.reader.readPacket(t.bufReader)
|
||||
func (t *transport) readPacket() (p []byte, err error) {
|
||||
for {
|
||||
p, err = t.reader.readPacket(t.bufReader)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
if len(p) == 0 || (p[0] != msgIgnore && p[0] != msgDebug) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return p, err
|
||||
}
|
||||
|
||||
func (s *connectionState) readPacket(r *bufio.Reader) ([]byte, error) {
|
||||
|
||||
508
vendor/golang.org/x/net/idna/idna.go
generated
vendored
508
vendor/golang.org/x/net/idna/idna.go
generated
vendored
@@ -1,61 +1,501 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Copied from the golang.org/x/text repo; DO NOT EDIT
|
||||
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package idna implements IDNA2008 (Internationalized Domain Names for
|
||||
// Applications), defined in RFC 5890, RFC 5891, RFC 5892, RFC 5893 and
|
||||
// RFC 5894.
|
||||
// Package idna implements IDNA2008 using the compatibility processing
|
||||
// defined by UTS (Unicode Technical Standard) #46, which defines a standard to
|
||||
// deal with the transition from IDNA2003.
|
||||
//
|
||||
// IDNA2008 (Internationalized Domain Names for Applications), is defined in RFC
|
||||
// 5890, RFC 5891, RFC 5892, RFC 5893 and RFC 5894.
|
||||
// UTS #46 is defined in http://www.unicode.org/reports/tr46.
|
||||
// See http://unicode.org/cldr/utility/idna.jsp for a visualization of the
|
||||
// differences between these two standards.
|
||||
package idna // import "golang.org/x/net/idna"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
|
||||
"golang.org/x/text/secure/bidirule"
|
||||
"golang.org/x/text/unicode/norm"
|
||||
)
|
||||
|
||||
// TODO(nigeltao): specify when errors occur. For example, is ToASCII(".") or
|
||||
// ToASCII("foo\x00") an error? See also http://www.unicode.org/faq/idn.html#11
|
||||
|
||||
// acePrefix is the ASCII Compatible Encoding prefix.
|
||||
const acePrefix = "xn--"
|
||||
// NOTE: Unlike common practice in Go APIs, the functions will return a
|
||||
// sanitized domain name in case of errors. Browsers sometimes use a partially
|
||||
// evaluated string as lookup.
|
||||
// TODO: the current error handling is, in my opinion, the least opinionated.
|
||||
// Other strategies are also viable, though:
|
||||
// Option 1) Return an empty string in case of error, but allow the user to
|
||||
// specify explicitly which errors to ignore.
|
||||
// Option 2) Return the partially evaluated string if it is itself a valid
|
||||
// string, otherwise return the empty string in case of error.
|
||||
// Option 3) Option 1 and 2.
|
||||
// Option 4) Always return an empty string for now and implement Option 1 as
|
||||
// needed, and document that the return string may not be empty in case of
|
||||
// error in the future.
|
||||
// I think Option 1 is best, but it is quite opinionated.
|
||||
|
||||
// ToASCII converts a domain or domain label to its ASCII form. For example,
|
||||
// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and
|
||||
// ToASCII("golang") is "golang".
|
||||
// ToASCII("golang") is "golang". If an error is encountered it will return
|
||||
// an error and a (partially) processed result.
|
||||
func ToASCII(s string) (string, error) {
|
||||
if ascii(s) {
|
||||
return s, nil
|
||||
}
|
||||
labels := strings.Split(s, ".")
|
||||
for i, label := range labels {
|
||||
if !ascii(label) {
|
||||
a, err := encode(acePrefix, label)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
labels[i] = a
|
||||
}
|
||||
}
|
||||
return strings.Join(labels, "."), nil
|
||||
return Resolve.process(s, true)
|
||||
}
|
||||
|
||||
// ToUnicode converts a domain or domain label to its Unicode form. For example,
|
||||
// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and
|
||||
// ToUnicode("golang") is "golang".
|
||||
// ToUnicode("golang") is "golang". If an error is encountered it will return
|
||||
// an error and a (partially) processed result.
|
||||
func ToUnicode(s string) (string, error) {
|
||||
if !strings.Contains(s, acePrefix) {
|
||||
return s, nil
|
||||
return NonTransitional.process(s, false)
|
||||
}
|
||||
|
||||
// An Option configures a Profile at creation time.
|
||||
type Option func(*options)
|
||||
|
||||
// Transitional sets a Profile to use the Transitional mapping as defined
|
||||
// in UTS #46.
|
||||
func Transitional(transitional bool) Option {
|
||||
return func(o *options) { o.transitional = true }
|
||||
}
|
||||
|
||||
// VerifyDNSLength sets whether a Profile should fail if any of the IDN parts
|
||||
// are longer than allowed by the RFC.
|
||||
func VerifyDNSLength(verify bool) Option {
|
||||
return func(o *options) { o.verifyDNSLength = verify }
|
||||
}
|
||||
|
||||
// IgnoreSTD3Rules sets whether ASCII characters outside the A-Z, a-z, 0-9 and
|
||||
// the hyphen should be allowed. By default this is not allowed, but IDNA2003,
|
||||
// and as a consequence UTS #46, allows this to be overridden to support
|
||||
// browsers that allow characters outside this range, for example a '_' (U+005F
|
||||
// LOW LINE). See http://www.rfc- editor.org/std/std3.txt for more details.
|
||||
func IgnoreSTD3Rules(ignore bool) Option {
|
||||
return func(o *options) { o.ignoreSTD3Rules = ignore }
|
||||
}
|
||||
|
||||
type options struct {
|
||||
transitional bool
|
||||
ignoreSTD3Rules bool
|
||||
verifyDNSLength bool
|
||||
}
|
||||
|
||||
// A Profile defines the configuration of a IDNA mapper.
|
||||
type Profile struct {
|
||||
options
|
||||
}
|
||||
|
||||
func apply(o *options, opts []Option) {
|
||||
for _, f := range opts {
|
||||
f(o)
|
||||
}
|
||||
}
|
||||
|
||||
// New creates a new Profile.
|
||||
// With no options, the returned profile is the non-transitional profile as
|
||||
// defined in UTS #46.
|
||||
func New(o ...Option) *Profile {
|
||||
p := &Profile{}
|
||||
apply(&p.options, o)
|
||||
return p
|
||||
}
|
||||
|
||||
// ToASCII converts a domain or domain label to its ASCII form. For example,
|
||||
// ToASCII("bücher.example.com") is "xn--bcher-kva.example.com", and
|
||||
// ToASCII("golang") is "golang". If an error is encountered it will return
|
||||
// an error and a (partially) processed result.
|
||||
func (p *Profile) ToASCII(s string) (string, error) {
|
||||
return p.process(s, true)
|
||||
}
|
||||
|
||||
// ToUnicode converts a domain or domain label to its Unicode form. For example,
|
||||
// ToUnicode("xn--bcher-kva.example.com") is "bücher.example.com", and
|
||||
// ToUnicode("golang") is "golang". If an error is encountered it will return
|
||||
// an error and a (partially) processed result.
|
||||
func (p *Profile) ToUnicode(s string) (string, error) {
|
||||
pp := *p
|
||||
pp.transitional = false
|
||||
return pp.process(s, false)
|
||||
}
|
||||
|
||||
// String reports a string with a description of the profile for debugging
|
||||
// purposes. The string format may change with different versions.
|
||||
func (p *Profile) String() string {
|
||||
s := ""
|
||||
if p.transitional {
|
||||
s = "Transitional"
|
||||
} else {
|
||||
s = "NonTransitional"
|
||||
}
|
||||
if p.ignoreSTD3Rules {
|
||||
s += ":NoSTD3Rules"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
var (
|
||||
// Resolve is the recommended profile for resolving domain names.
|
||||
// The configuration of this profile may change over time.
|
||||
Resolve = resolve
|
||||
|
||||
// Display is the recommended profile for displaying domain names.
|
||||
// The configuration of this profile may change over time.
|
||||
Display = display
|
||||
|
||||
// NonTransitional defines a profile that implements the Transitional
|
||||
// mapping as defined in UTS #46 with no additional constraints.
|
||||
NonTransitional = nonTransitional
|
||||
|
||||
resolve = &Profile{options{transitional: true}}
|
||||
display = &Profile{}
|
||||
nonTransitional = &Profile{}
|
||||
|
||||
// TODO: profiles
|
||||
// V2008: strict IDNA2008
|
||||
// Register: recommended for approving domain names: nontransitional, but
|
||||
// bundle or block deviation characters.
|
||||
)
|
||||
|
||||
type labelError struct{ label, code_ string }
|
||||
|
||||
func (e labelError) code() string { return e.code_ }
|
||||
func (e labelError) Error() string {
|
||||
return fmt.Sprintf("idna: invalid label %q", e.label)
|
||||
}
|
||||
|
||||
type runeError rune
|
||||
|
||||
func (e runeError) code() string { return "P1" }
|
||||
func (e runeError) Error() string {
|
||||
return fmt.Sprintf("idna: disallowed rune %U", e)
|
||||
}
|
||||
|
||||
// process implements the algorithm described in section 4 of UTS #46,
|
||||
// see http://www.unicode.org/reports/tr46.
|
||||
func (p *Profile) process(s string, toASCII bool) (string, error) {
|
||||
var (
|
||||
b []byte
|
||||
err error
|
||||
k, i int
|
||||
)
|
||||
for i < len(s) {
|
||||
v, sz := trie.lookupString(s[i:])
|
||||
start := i
|
||||
i += sz
|
||||
// Copy bytes not copied so far.
|
||||
switch p.simplify(info(v).category()) {
|
||||
case valid:
|
||||
continue
|
||||
case disallowed:
|
||||
if err == nil {
|
||||
r, _ := utf8.DecodeRuneInString(s[i:])
|
||||
err = runeError(r)
|
||||
}
|
||||
continue
|
||||
case mapped, deviation:
|
||||
b = append(b, s[k:start]...)
|
||||
b = info(v).appendMapping(b, s[start:i])
|
||||
case ignored:
|
||||
b = append(b, s[k:start]...)
|
||||
// drop the rune
|
||||
case unknown:
|
||||
b = append(b, s[k:start]...)
|
||||
b = append(b, "\ufffd"...)
|
||||
}
|
||||
k = i
|
||||
}
|
||||
if k == 0 {
|
||||
// No changes so far.
|
||||
s = norm.NFC.String(s)
|
||||
} else {
|
||||
b = append(b, s[k:]...)
|
||||
if norm.NFC.QuickSpan(b) != len(b) {
|
||||
b = norm.NFC.Bytes(b)
|
||||
}
|
||||
// TODO: the punycode converters require strings as input.
|
||||
s = string(b)
|
||||
}
|
||||
// Remove leading empty labels
|
||||
for ; len(s) > 0 && s[0] == '.'; s = s[1:] {
|
||||
}
|
||||
if s == "" {
|
||||
return "", &labelError{s, "A4"}
|
||||
}
|
||||
labels := labelIter{orig: s}
|
||||
for ; !labels.done(); labels.next() {
|
||||
label := labels.label()
|
||||
if label == "" {
|
||||
// Empty labels are not okay. The label iterator skips the last
|
||||
// label if it is empty.
|
||||
if err == nil {
|
||||
err = &labelError{s, "A4"}
|
||||
}
|
||||
continue
|
||||
}
|
||||
labels := strings.Split(s, ".")
|
||||
for i, label := range labels {
|
||||
if strings.HasPrefix(label, acePrefix) {
|
||||
u, err := decode(label[len(acePrefix):])
|
||||
if err != nil {
|
||||
return "", err
|
||||
u, err2 := decode(label[len(acePrefix):])
|
||||
if err2 != nil {
|
||||
if err == nil {
|
||||
err = err2
|
||||
}
|
||||
labels[i] = u
|
||||
// Spec says keep the old label.
|
||||
continue
|
||||
}
|
||||
labels.set(u)
|
||||
if err == nil {
|
||||
err = p.validateFromPunycode(u)
|
||||
}
|
||||
if err == nil {
|
||||
err = NonTransitional.validate(u)
|
||||
}
|
||||
} else if err == nil {
|
||||
err = p.validate(label)
|
||||
}
|
||||
}
|
||||
return strings.Join(labels, "."), nil
|
||||
if toASCII {
|
||||
for labels.reset(); !labels.done(); labels.next() {
|
||||
label := labels.label()
|
||||
if !ascii(label) {
|
||||
a, err2 := encode(acePrefix, label)
|
||||
if err == nil {
|
||||
err = err2
|
||||
}
|
||||
label = a
|
||||
labels.set(a)
|
||||
}
|
||||
n := len(label)
|
||||
if p.verifyDNSLength && err == nil && (n == 0 || n > 63) {
|
||||
err = &labelError{label, "A4"}
|
||||
}
|
||||
}
|
||||
}
|
||||
s = labels.result()
|
||||
if toASCII && p.verifyDNSLength && err == nil {
|
||||
// Compute the length of the domain name minus the root label and its dot.
|
||||
n := len(s)
|
||||
if n > 0 && s[n-1] == '.' {
|
||||
n--
|
||||
}
|
||||
if len(s) < 1 || n > 253 {
|
||||
err = &labelError{s, "A4"}
|
||||
}
|
||||
}
|
||||
return s, err
|
||||
}
|
||||
|
||||
// A labelIter allows iterating over domain name labels.
|
||||
type labelIter struct {
|
||||
orig string
|
||||
slice []string
|
||||
curStart int
|
||||
curEnd int
|
||||
i int
|
||||
}
|
||||
|
||||
func (l *labelIter) reset() {
|
||||
l.curStart = 0
|
||||
l.curEnd = 0
|
||||
l.i = 0
|
||||
}
|
||||
|
||||
func (l *labelIter) done() bool {
|
||||
return l.curStart >= len(l.orig)
|
||||
}
|
||||
|
||||
func (l *labelIter) result() string {
|
||||
if l.slice != nil {
|
||||
return strings.Join(l.slice, ".")
|
||||
}
|
||||
return l.orig
|
||||
}
|
||||
|
||||
func (l *labelIter) label() string {
|
||||
if l.slice != nil {
|
||||
return l.slice[l.i]
|
||||
}
|
||||
p := strings.IndexByte(l.orig[l.curStart:], '.')
|
||||
l.curEnd = l.curStart + p
|
||||
if p == -1 {
|
||||
l.curEnd = len(l.orig)
|
||||
}
|
||||
return l.orig[l.curStart:l.curEnd]
|
||||
}
|
||||
|
||||
// next sets the value to the next label. It skips the last label if it is empty.
|
||||
func (l *labelIter) next() {
|
||||
l.i++
|
||||
if l.slice != nil {
|
||||
if l.i >= len(l.slice) || l.i == len(l.slice)-1 && l.slice[l.i] == "" {
|
||||
l.curStart = len(l.orig)
|
||||
}
|
||||
} else {
|
||||
l.curStart = l.curEnd + 1
|
||||
if l.curStart == len(l.orig)-1 && l.orig[l.curStart] == '.' {
|
||||
l.curStart = len(l.orig)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (l *labelIter) set(s string) {
|
||||
if l.slice == nil {
|
||||
l.slice = strings.Split(l.orig, ".")
|
||||
}
|
||||
l.slice[l.i] = s
|
||||
}
|
||||
|
||||
// acePrefix is the ASCII Compatible Encoding prefix.
|
||||
const acePrefix = "xn--"
|
||||
|
||||
func (p *Profile) simplify(cat category) category {
|
||||
switch cat {
|
||||
case disallowedSTD3Mapped:
|
||||
if !p.ignoreSTD3Rules {
|
||||
cat = disallowed
|
||||
} else {
|
||||
cat = mapped
|
||||
}
|
||||
case disallowedSTD3Valid:
|
||||
if !p.ignoreSTD3Rules {
|
||||
cat = disallowed
|
||||
} else {
|
||||
cat = valid
|
||||
}
|
||||
case deviation:
|
||||
if !p.transitional {
|
||||
cat = valid
|
||||
}
|
||||
case validNV8, validXV8:
|
||||
// TODO: handle V2008
|
||||
cat = valid
|
||||
}
|
||||
return cat
|
||||
}
|
||||
|
||||
func (p *Profile) validateFromPunycode(s string) error {
|
||||
if !norm.NFC.IsNormalString(s) {
|
||||
return &labelError{s, "V1"}
|
||||
}
|
||||
for i := 0; i < len(s); {
|
||||
v, sz := trie.lookupString(s[i:])
|
||||
if c := p.simplify(info(v).category()); c != valid && c != deviation {
|
||||
return &labelError{s, "V6"}
|
||||
}
|
||||
i += sz
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
const (
|
||||
zwnj = "\u200c"
|
||||
zwj = "\u200d"
|
||||
)
|
||||
|
||||
type joinState int8
|
||||
|
||||
const (
|
||||
stateStart joinState = iota
|
||||
stateVirama
|
||||
stateBefore
|
||||
stateBeforeVirama
|
||||
stateAfter
|
||||
stateFAIL
|
||||
)
|
||||
|
||||
var joinStates = [][numJoinTypes]joinState{
|
||||
stateStart: {
|
||||
joiningL: stateBefore,
|
||||
joiningD: stateBefore,
|
||||
joinZWNJ: stateFAIL,
|
||||
joinZWJ: stateFAIL,
|
||||
joinVirama: stateVirama,
|
||||
},
|
||||
stateVirama: {
|
||||
joiningL: stateBefore,
|
||||
joiningD: stateBefore,
|
||||
},
|
||||
stateBefore: {
|
||||
joiningL: stateBefore,
|
||||
joiningD: stateBefore,
|
||||
joiningT: stateBefore,
|
||||
joinZWNJ: stateAfter,
|
||||
joinZWJ: stateFAIL,
|
||||
joinVirama: stateBeforeVirama,
|
||||
},
|
||||
stateBeforeVirama: {
|
||||
joiningL: stateBefore,
|
||||
joiningD: stateBefore,
|
||||
joiningT: stateBefore,
|
||||
},
|
||||
stateAfter: {
|
||||
joiningL: stateFAIL,
|
||||
joiningD: stateBefore,
|
||||
joiningT: stateAfter,
|
||||
joiningR: stateStart,
|
||||
joinZWNJ: stateFAIL,
|
||||
joinZWJ: stateFAIL,
|
||||
joinVirama: stateAfter, // no-op as we can't accept joiners here
|
||||
},
|
||||
stateFAIL: {
|
||||
0: stateFAIL,
|
||||
joiningL: stateFAIL,
|
||||
joiningD: stateFAIL,
|
||||
joiningT: stateFAIL,
|
||||
joiningR: stateFAIL,
|
||||
joinZWNJ: stateFAIL,
|
||||
joinZWJ: stateFAIL,
|
||||
joinVirama: stateFAIL,
|
||||
},
|
||||
}
|
||||
|
||||
// validate validates the criteria from Section 4.1. Item 1, 4, and 6 are
|
||||
// already implicitly satisfied by the overall implementation.
|
||||
func (p *Profile) validate(s string) error {
|
||||
if len(s) > 4 && s[2] == '-' && s[3] == '-' {
|
||||
return &labelError{s, "V2"}
|
||||
}
|
||||
if s[0] == '-' || s[len(s)-1] == '-' {
|
||||
return &labelError{s, "V3"}
|
||||
}
|
||||
// TODO: merge the use of this in the trie.
|
||||
v, sz := trie.lookupString(s)
|
||||
x := info(v)
|
||||
if x.isModifier() {
|
||||
return &labelError{s, "V5"}
|
||||
}
|
||||
if !bidirule.ValidString(s) {
|
||||
return &labelError{s, "B"}
|
||||
}
|
||||
// Quickly return in the absence of zero-width (non) joiners.
|
||||
if strings.Index(s, zwj) == -1 && strings.Index(s, zwnj) == -1 {
|
||||
return nil
|
||||
}
|
||||
st := stateStart
|
||||
for i := 0; ; {
|
||||
jt := x.joinType()
|
||||
if s[i:i+sz] == zwj {
|
||||
jt = joinZWJ
|
||||
} else if s[i:i+sz] == zwnj {
|
||||
jt = joinZWNJ
|
||||
}
|
||||
st = joinStates[st][jt]
|
||||
if x.isViramaModifier() {
|
||||
st = joinStates[st][joinVirama]
|
||||
}
|
||||
if i += sz; i == len(s) {
|
||||
break
|
||||
}
|
||||
v, sz = trie.lookupString(s[i:])
|
||||
x = info(v)
|
||||
}
|
||||
if st == stateFAIL || st == stateAfter {
|
||||
return &labelError{s, "C"}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func ascii(s string) bool {
|
||||
|
||||
23
vendor/golang.org/x/net/idna/punycode.go
generated
vendored
23
vendor/golang.org/x/net/idna/punycode.go
generated
vendored
@@ -1,4 +1,6 @@
|
||||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Copied from the golang.org/x/text repo; DO NOT EDIT
|
||||
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
@@ -7,7 +9,6 @@ package idna
|
||||
// This file implements the Punycode algorithm from RFC 3492.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
@@ -27,6 +28,8 @@ const (
|
||||
tmin int32 = 1
|
||||
)
|
||||
|
||||
func punyError(s string) error { return &labelError{s, "A3"} }
|
||||
|
||||
// decode decodes a string as specified in section 6.2.
|
||||
func decode(encoded string) (string, error) {
|
||||
if encoded == "" {
|
||||
@@ -34,7 +37,7 @@ func decode(encoded string) (string, error) {
|
||||
}
|
||||
pos := 1 + strings.LastIndex(encoded, "-")
|
||||
if pos == 1 {
|
||||
return "", fmt.Errorf("idna: invalid label %q", encoded)
|
||||
return "", punyError(encoded)
|
||||
}
|
||||
if pos == len(encoded) {
|
||||
return encoded[:len(encoded)-1], nil
|
||||
@@ -50,16 +53,16 @@ func decode(encoded string) (string, error) {
|
||||
oldI, w := i, int32(1)
|
||||
for k := base; ; k += base {
|
||||
if pos == len(encoded) {
|
||||
return "", fmt.Errorf("idna: invalid label %q", encoded)
|
||||
return "", punyError(encoded)
|
||||
}
|
||||
digit, ok := decodeDigit(encoded[pos])
|
||||
if !ok {
|
||||
return "", fmt.Errorf("idna: invalid label %q", encoded)
|
||||
return "", punyError(encoded)
|
||||
}
|
||||
pos++
|
||||
i += digit * w
|
||||
if i < 0 {
|
||||
return "", fmt.Errorf("idna: invalid label %q", encoded)
|
||||
return "", punyError(encoded)
|
||||
}
|
||||
t := k - bias
|
||||
if t < tmin {
|
||||
@@ -72,7 +75,7 @@ func decode(encoded string) (string, error) {
|
||||
}
|
||||
w *= base - t
|
||||
if w >= math.MaxInt32/base {
|
||||
return "", fmt.Errorf("idna: invalid label %q", encoded)
|
||||
return "", punyError(encoded)
|
||||
}
|
||||
}
|
||||
x := int32(len(output) + 1)
|
||||
@@ -80,7 +83,7 @@ func decode(encoded string) (string, error) {
|
||||
n += i / x
|
||||
i %= x
|
||||
if n > utf8.MaxRune || len(output) >= 1024 {
|
||||
return "", fmt.Errorf("idna: invalid label %q", encoded)
|
||||
return "", punyError(encoded)
|
||||
}
|
||||
output = append(output, 0)
|
||||
copy(output[i+1:], output[i:])
|
||||
@@ -121,14 +124,14 @@ func encode(prefix, s string) (string, error) {
|
||||
}
|
||||
delta += (m - n) * (h + 1)
|
||||
if delta < 0 {
|
||||
return "", fmt.Errorf("idna: invalid label %q", s)
|
||||
return "", punyError(s)
|
||||
}
|
||||
n = m
|
||||
for _, r := range s {
|
||||
if r < n {
|
||||
delta++
|
||||
if delta < 0 {
|
||||
return "", fmt.Errorf("idna: invalid label %q", s)
|
||||
return "", punyError(s)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
4479
vendor/golang.org/x/net/idna/tables.go
generated
vendored
Normal file
4479
vendor/golang.org/x/net/idna/tables.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
71
vendor/golang.org/x/net/idna/trie.go
generated
vendored
Normal file
71
vendor/golang.org/x/net/idna/trie.go
generated
vendored
Normal file
@@ -0,0 +1,71 @@
|
||||
// Copied from the golang.org/x/text repo; DO NOT EDIT
|
||||
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package idna
|
||||
|
||||
// appendMapping appends the mapping for the respective rune. isMapped must be
|
||||
// true. A mapping is a categorization of a rune as defined in UTS #46.
|
||||
func (c info) appendMapping(b []byte, s string) []byte {
|
||||
index := int(c >> indexShift)
|
||||
if c&xorBit == 0 {
|
||||
s := mappings[index:]
|
||||
return append(b, s[1:s[0]+1]...)
|
||||
}
|
||||
b = append(b, s...)
|
||||
if c&inlineXOR == inlineXOR {
|
||||
// TODO: support and handle two-byte inline masks
|
||||
b[len(b)-1] ^= byte(index)
|
||||
} else {
|
||||
for p := len(b) - int(xorData[index]); p < len(b); p++ {
|
||||
index++
|
||||
b[p] ^= xorData[index]
|
||||
}
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// Sparse block handling code.
|
||||
|
||||
type valueRange struct {
|
||||
value uint16 // header: value:stride
|
||||
lo, hi byte // header: lo:n
|
||||
}
|
||||
|
||||
type sparseBlocks struct {
|
||||
values []valueRange
|
||||
offset []uint16
|
||||
}
|
||||
|
||||
var idnaSparse = sparseBlocks{
|
||||
values: idnaSparseValues[:],
|
||||
offset: idnaSparseOffset[:],
|
||||
}
|
||||
|
||||
var trie = newIdnaTrie(0)
|
||||
|
||||
// lookup determines the type of block n and looks up the value for b.
|
||||
// For n < t.cutoff, the block is a simple lookup table. Otherwise, the block
|
||||
// is a list of ranges with an accompanying value. Given a matching range r,
|
||||
// the value for b is by r.value + (b - r.lo) * stride.
|
||||
func (t *sparseBlocks) lookup(n uint32, b byte) uint16 {
|
||||
offset := t.offset[n]
|
||||
header := t.values[offset]
|
||||
lo := offset + 1
|
||||
hi := lo + uint16(header.lo)
|
||||
for lo < hi {
|
||||
m := lo + (hi-lo)/2
|
||||
r := t.values[m]
|
||||
if r.lo <= b && b <= r.hi {
|
||||
return r.value + uint16(b-r.lo)*header.value
|
||||
}
|
||||
if b < r.lo {
|
||||
hi = m
|
||||
} else {
|
||||
lo = m + 1
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
116
vendor/golang.org/x/net/idna/trieval.go
generated
vendored
Normal file
116
vendor/golang.org/x/net/idna/trieval.go
generated
vendored
Normal file
@@ -0,0 +1,116 @@
|
||||
// Copied from the golang.org/x/text repo; DO NOT EDIT
|
||||
|
||||
// This file was generated by go generate; DO NOT EDIT
|
||||
|
||||
package idna
|
||||
|
||||
// This file contains definitions for interpreting the trie value of the idna
|
||||
// trie generated by "go run gen*.go". It is shared by both the generator
|
||||
// program and the resultant package. Sharing is achieved by the generator
|
||||
// copying gen_trieval.go to trieval.go and changing what's above this comment.
|
||||
|
||||
// info holds information from the IDNA mapping table for a single rune. It is
|
||||
// the value returned by a trie lookup. In most cases, all information fits in
|
||||
// a 16-bit value. For mappings, this value may contain an index into a slice
|
||||
// with the mapped string. Such mappings can consist of the actual mapped value
|
||||
// or an XOR pattern to be applied to the bytes of the UTF8 encoding of the
|
||||
// input rune. This technique is used by the cases packages and reduces the
|
||||
// table size significantly.
|
||||
//
|
||||
// The per-rune values have the following format:
|
||||
//
|
||||
// if mapped {
|
||||
// if inlinedXOR {
|
||||
// 15..13 inline XOR marker
|
||||
// 12..11 unused
|
||||
// 10..3 inline XOR mask
|
||||
// } else {
|
||||
// 15..3 index into xor or mapping table
|
||||
// }
|
||||
// } else {
|
||||
// 15..13 unused
|
||||
// 12 modifier (including virama)
|
||||
// 11 virama modifier
|
||||
// 10..8 joining type
|
||||
// 7..3 category type
|
||||
// }
|
||||
// 2 use xor pattern
|
||||
// 1..0 mapped category
|
||||
//
|
||||
// See the definitions below for a more detailed description of the various
|
||||
// bits.
|
||||
type info uint16
|
||||
|
||||
const (
|
||||
catSmallMask = 0x3
|
||||
catBigMask = 0xF8
|
||||
indexShift = 3
|
||||
xorBit = 0x4 // interpret the index as an xor pattern
|
||||
inlineXOR = 0xE000 // These bits are set if the XOR pattern is inlined.
|
||||
|
||||
joinShift = 8
|
||||
joinMask = 0x07
|
||||
|
||||
viramaModifier = 0x0800
|
||||
modifier = 0x1000
|
||||
)
|
||||
|
||||
// A category corresponds to a category defined in the IDNA mapping table.
|
||||
type category uint16
|
||||
|
||||
const (
|
||||
unknown category = 0 // not defined currently in unicode.
|
||||
mapped category = 1
|
||||
disallowedSTD3Mapped category = 2
|
||||
deviation category = 3
|
||||
)
|
||||
|
||||
const (
|
||||
valid category = 0x08
|
||||
validNV8 category = 0x18
|
||||
validXV8 category = 0x28
|
||||
disallowed category = 0x40
|
||||
disallowedSTD3Valid category = 0x80
|
||||
ignored category = 0xC0
|
||||
)
|
||||
|
||||
// join types and additional rune information
|
||||
const (
|
||||
joiningL = (iota + 1)
|
||||
joiningD
|
||||
joiningT
|
||||
joiningR
|
||||
|
||||
//the following types are derived during processing
|
||||
joinZWJ
|
||||
joinZWNJ
|
||||
joinVirama
|
||||
numJoinTypes
|
||||
)
|
||||
|
||||
func (c info) isMapped() bool {
|
||||
return c&0x3 != 0
|
||||
}
|
||||
|
||||
func (c info) category() category {
|
||||
small := c & catSmallMask
|
||||
if small != 0 {
|
||||
return category(small)
|
||||
}
|
||||
return category(c & catBigMask)
|
||||
}
|
||||
|
||||
func (c info) joinType() info {
|
||||
if c.isMapped() {
|
||||
return 0
|
||||
}
|
||||
return (c >> joinShift) & joinMask
|
||||
}
|
||||
|
||||
func (c info) isModifier() bool {
|
||||
return c&(modifier|catSmallMask) == modifier
|
||||
}
|
||||
|
||||
func (c info) isViramaModifier() bool {
|
||||
return c&(viramaModifier|catSmallMask) == viramaModifier
|
||||
}
|
||||
27
vendor/golang.org/x/text/LICENSE
generated
vendored
Normal file
27
vendor/golang.org/x/text/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
22
vendor/golang.org/x/text/PATENTS
generated
vendored
Normal file
22
vendor/golang.org/x/text/PATENTS
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
Additional IP Rights Grant (Patents)
|
||||
|
||||
"This implementation" means the copyrightable works distributed by
|
||||
Google as part of the Go project.
|
||||
|
||||
Google hereby grants to You a perpetual, worldwide, non-exclusive,
|
||||
no-charge, royalty-free, irrevocable (except as stated in this section)
|
||||
patent license to make, have made, use, offer to sell, sell, import,
|
||||
transfer and otherwise run, modify and propagate the contents of this
|
||||
implementation of Go, where such license applies only to those patent
|
||||
claims, both currently owned or controlled by Google and acquired in
|
||||
the future, licensable by Google that are necessarily infringed by this
|
||||
implementation of Go. This grant does not include claims that would be
|
||||
infringed only as a consequence of further modification of this
|
||||
implementation. If you or your agent or exclusive licensee institute or
|
||||
order or agree to the institution of patent litigation against any
|
||||
entity (including a cross-claim or counterclaim in a lawsuit) alleging
|
||||
that this implementation of Go or any code incorporated within this
|
||||
implementation of Go constitutes direct or contributory patent
|
||||
infringement, or inducement of patent infringement, then any patent
|
||||
rights granted to you under this License for this implementation of Go
|
||||
shall terminate as of the date such litigation is filed.
|
||||
351
vendor/golang.org/x/text/internal/gen/code.go
generated
vendored
Normal file
351
vendor/golang.org/x/text/internal/gen/code.go
generated
vendored
Normal file
@@ -0,0 +1,351 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gen
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/gob"
|
||||
"fmt"
|
||||
"hash"
|
||||
"hash/fnv"
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// This file contains utilities for generating code.
|
||||
|
||||
// TODO: other write methods like:
|
||||
// - slices, maps, types, etc.
|
||||
|
||||
// CodeWriter is a utility for writing structured code. It computes the content
|
||||
// hash and size of written content. It ensures there are newlines between
|
||||
// written code blocks.
|
||||
type CodeWriter struct {
|
||||
buf bytes.Buffer
|
||||
Size int
|
||||
Hash hash.Hash32 // content hash
|
||||
gob *gob.Encoder
|
||||
// For comments we skip the usual one-line separator if they are followed by
|
||||
// a code block.
|
||||
skipSep bool
|
||||
}
|
||||
|
||||
func (w *CodeWriter) Write(p []byte) (n int, err error) {
|
||||
return w.buf.Write(p)
|
||||
}
|
||||
|
||||
// NewCodeWriter returns a new CodeWriter.
|
||||
func NewCodeWriter() *CodeWriter {
|
||||
h := fnv.New32()
|
||||
return &CodeWriter{Hash: h, gob: gob.NewEncoder(h)}
|
||||
}
|
||||
|
||||
// WriteGoFile appends the buffer with the total size of all created structures
|
||||
// and writes it as a Go file to the the given file with the given package name.
|
||||
func (w *CodeWriter) WriteGoFile(filename, pkg string) {
|
||||
f, err := os.Create(filename)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not create file %s: %v", filename, err)
|
||||
}
|
||||
defer f.Close()
|
||||
if _, err = w.WriteGo(f, pkg); err != nil {
|
||||
log.Fatalf("Error writing file %s: %v", filename, err)
|
||||
}
|
||||
}
|
||||
|
||||
// WriteGo appends the buffer with the total size of all created structures and
|
||||
// writes it as a Go file to the the given writer with the given package name.
|
||||
func (w *CodeWriter) WriteGo(out io.Writer, pkg string) (n int, err error) {
|
||||
sz := w.Size
|
||||
w.WriteComment("Total table size %d bytes (%dKiB); checksum: %X\n", sz, sz/1024, w.Hash.Sum32())
|
||||
defer w.buf.Reset()
|
||||
return WriteGo(out, pkg, w.buf.Bytes())
|
||||
}
|
||||
|
||||
func (w *CodeWriter) printf(f string, x ...interface{}) {
|
||||
fmt.Fprintf(w, f, x...)
|
||||
}
|
||||
|
||||
func (w *CodeWriter) insertSep() {
|
||||
if w.skipSep {
|
||||
w.skipSep = false
|
||||
return
|
||||
}
|
||||
// Use at least two newlines to ensure a blank space between the previous
|
||||
// block. WriteGoFile will remove extraneous newlines.
|
||||
w.printf("\n\n")
|
||||
}
|
||||
|
||||
// WriteComment writes a comment block. All line starts are prefixed with "//".
|
||||
// Initial empty lines are gobbled. The indentation for the first line is
|
||||
// stripped from consecutive lines.
|
||||
func (w *CodeWriter) WriteComment(comment string, args ...interface{}) {
|
||||
s := fmt.Sprintf(comment, args...)
|
||||
s = strings.Trim(s, "\n")
|
||||
|
||||
// Use at least two newlines to ensure a blank space between the previous
|
||||
// block. WriteGoFile will remove extraneous newlines.
|
||||
w.printf("\n\n// ")
|
||||
w.skipSep = true
|
||||
|
||||
// strip first indent level.
|
||||
sep := "\n"
|
||||
for ; len(s) > 0 && (s[0] == '\t' || s[0] == ' '); s = s[1:] {
|
||||
sep += s[:1]
|
||||
}
|
||||
|
||||
strings.NewReplacer(sep, "\n// ", "\n", "\n// ").WriteString(w, s)
|
||||
|
||||
w.printf("\n")
|
||||
}
|
||||
|
||||
func (w *CodeWriter) writeSizeInfo(size int) {
|
||||
w.printf("// Size: %d bytes\n", size)
|
||||
}
|
||||
|
||||
// WriteConst writes a constant of the given name and value.
|
||||
func (w *CodeWriter) WriteConst(name string, x interface{}) {
|
||||
w.insertSep()
|
||||
v := reflect.ValueOf(x)
|
||||
|
||||
switch v.Type().Kind() {
|
||||
case reflect.String:
|
||||
w.printf("const %s %s = ", name, typeName(x))
|
||||
w.WriteString(v.String())
|
||||
w.printf("\n")
|
||||
default:
|
||||
w.printf("const %s = %#v\n", name, x)
|
||||
}
|
||||
}
|
||||
|
||||
// WriteVar writes a variable of the given name and value.
|
||||
func (w *CodeWriter) WriteVar(name string, x interface{}) {
|
||||
w.insertSep()
|
||||
v := reflect.ValueOf(x)
|
||||
oldSize := w.Size
|
||||
sz := int(v.Type().Size())
|
||||
w.Size += sz
|
||||
|
||||
switch v.Type().Kind() {
|
||||
case reflect.String:
|
||||
w.printf("var %s %s = ", name, typeName(x))
|
||||
w.WriteString(v.String())
|
||||
case reflect.Struct:
|
||||
w.gob.Encode(x)
|
||||
fallthrough
|
||||
case reflect.Slice, reflect.Array:
|
||||
w.printf("var %s = ", name)
|
||||
w.writeValue(v)
|
||||
w.writeSizeInfo(w.Size - oldSize)
|
||||
default:
|
||||
w.printf("var %s %s = ", name, typeName(x))
|
||||
w.gob.Encode(x)
|
||||
w.writeValue(v)
|
||||
w.writeSizeInfo(w.Size - oldSize)
|
||||
}
|
||||
w.printf("\n")
|
||||
}
|
||||
|
||||
func (w *CodeWriter) writeValue(v reflect.Value) {
|
||||
x := v.Interface()
|
||||
switch v.Kind() {
|
||||
case reflect.String:
|
||||
w.WriteString(v.String())
|
||||
case reflect.Array:
|
||||
// Don't double count: callers of WriteArray count on the size being
|
||||
// added, so we need to discount it here.
|
||||
w.Size -= int(v.Type().Size())
|
||||
w.writeSlice(x, true)
|
||||
case reflect.Slice:
|
||||
w.writeSlice(x, false)
|
||||
case reflect.Struct:
|
||||
w.printf("%s{\n", typeName(v.Interface()))
|
||||
t := v.Type()
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
w.printf("%s: ", t.Field(i).Name)
|
||||
w.writeValue(v.Field(i))
|
||||
w.printf(",\n")
|
||||
}
|
||||
w.printf("}")
|
||||
default:
|
||||
w.printf("%#v", x)
|
||||
}
|
||||
}
|
||||
|
||||
// WriteString writes a string literal.
|
||||
func (w *CodeWriter) WriteString(s string) {
|
||||
s = strings.Replace(s, `\`, `\\`, -1)
|
||||
io.WriteString(w.Hash, s) // content hash
|
||||
w.Size += len(s)
|
||||
|
||||
const maxInline = 40
|
||||
if len(s) <= maxInline {
|
||||
w.printf("%q", s)
|
||||
return
|
||||
}
|
||||
|
||||
// We will render the string as a multi-line string.
|
||||
const maxWidth = 80 - 4 - len(`"`) - len(`" +`)
|
||||
|
||||
// When starting on its own line, go fmt indents line 2+ an extra level.
|
||||
n, max := maxWidth, maxWidth-4
|
||||
|
||||
// As per https://golang.org/issue/18078, the compiler has trouble
|
||||
// compiling the concatenation of many strings, s0 + s1 + s2 + ... + sN,
|
||||
// for large N. We insert redundant, explicit parentheses to work around
|
||||
// that, lowering the N at any given step: (s0 + s1 + ... + s63) + (s64 +
|
||||
// ... + s127) + etc + (etc + ... + sN).
|
||||
explicitParens, extraComment := len(s) > 128*1024, ""
|
||||
if explicitParens {
|
||||
w.printf(`(`)
|
||||
extraComment = "; the redundant, explicit parens are for https://golang.org/issue/18078"
|
||||
}
|
||||
|
||||
// Print "" +\n, if a string does not start on its own line.
|
||||
b := w.buf.Bytes()
|
||||
if p := len(bytes.TrimRight(b, " \t")); p > 0 && b[p-1] != '\n' {
|
||||
w.printf("\"\" + // Size: %d bytes%s\n", len(s), extraComment)
|
||||
n, max = maxWidth, maxWidth
|
||||
}
|
||||
|
||||
w.printf(`"`)
|
||||
|
||||
for sz, p, nLines := 0, 0, 0; p < len(s); {
|
||||
var r rune
|
||||
r, sz = utf8.DecodeRuneInString(s[p:])
|
||||
out := s[p : p+sz]
|
||||
chars := 1
|
||||
if !unicode.IsPrint(r) || r == utf8.RuneError || r == '"' {
|
||||
switch sz {
|
||||
case 1:
|
||||
out = fmt.Sprintf("\\x%02x", s[p])
|
||||
case 2, 3:
|
||||
out = fmt.Sprintf("\\u%04x", r)
|
||||
case 4:
|
||||
out = fmt.Sprintf("\\U%08x", r)
|
||||
}
|
||||
chars = len(out)
|
||||
}
|
||||
if n -= chars; n < 0 {
|
||||
nLines++
|
||||
if explicitParens && nLines&63 == 63 {
|
||||
w.printf("\") + (\"")
|
||||
}
|
||||
w.printf("\" +\n\"")
|
||||
n = max - len(out)
|
||||
}
|
||||
w.printf("%s", out)
|
||||
p += sz
|
||||
}
|
||||
w.printf(`"`)
|
||||
if explicitParens {
|
||||
w.printf(`)`)
|
||||
}
|
||||
}
|
||||
|
||||
// WriteSlice writes a slice value.
|
||||
func (w *CodeWriter) WriteSlice(x interface{}) {
|
||||
w.writeSlice(x, false)
|
||||
}
|
||||
|
||||
// WriteArray writes an array value.
|
||||
func (w *CodeWriter) WriteArray(x interface{}) {
|
||||
w.writeSlice(x, true)
|
||||
}
|
||||
|
||||
func (w *CodeWriter) writeSlice(x interface{}, isArray bool) {
|
||||
v := reflect.ValueOf(x)
|
||||
w.gob.Encode(v.Len())
|
||||
w.Size += v.Len() * int(v.Type().Elem().Size())
|
||||
name := typeName(x)
|
||||
if isArray {
|
||||
name = fmt.Sprintf("[%d]%s", v.Len(), name[strings.Index(name, "]")+1:])
|
||||
}
|
||||
if isArray {
|
||||
w.printf("%s{\n", name)
|
||||
} else {
|
||||
w.printf("%s{ // %d elements\n", name, v.Len())
|
||||
}
|
||||
|
||||
switch kind := v.Type().Elem().Kind(); kind {
|
||||
case reflect.String:
|
||||
for _, s := range x.([]string) {
|
||||
w.WriteString(s)
|
||||
w.printf(",\n")
|
||||
}
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
|
||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
// nLine and nBlock are the number of elements per line and block.
|
||||
nLine, nBlock, format := 8, 64, "%d,"
|
||||
switch kind {
|
||||
case reflect.Uint8:
|
||||
format = "%#02x,"
|
||||
case reflect.Uint16:
|
||||
format = "%#04x,"
|
||||
case reflect.Uint32:
|
||||
nLine, nBlock, format = 4, 32, "%#08x,"
|
||||
case reflect.Uint, reflect.Uint64:
|
||||
nLine, nBlock, format = 4, 32, "%#016x,"
|
||||
case reflect.Int8:
|
||||
nLine = 16
|
||||
}
|
||||
n := nLine
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
if i%nBlock == 0 && v.Len() > nBlock {
|
||||
w.printf("// Entry %X - %X\n", i, i+nBlock-1)
|
||||
}
|
||||
x := v.Index(i).Interface()
|
||||
w.gob.Encode(x)
|
||||
w.printf(format, x)
|
||||
if n--; n == 0 {
|
||||
n = nLine
|
||||
w.printf("\n")
|
||||
}
|
||||
}
|
||||
w.printf("\n")
|
||||
case reflect.Struct:
|
||||
zero := reflect.Zero(v.Type().Elem()).Interface()
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
x := v.Index(i).Interface()
|
||||
w.gob.EncodeValue(v)
|
||||
if !reflect.DeepEqual(zero, x) {
|
||||
line := fmt.Sprintf("%#v,\n", x)
|
||||
line = line[strings.IndexByte(line, '{'):]
|
||||
w.printf("%d: ", i)
|
||||
w.printf(line)
|
||||
}
|
||||
}
|
||||
case reflect.Array:
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
w.printf("%d: %#v,\n", i, v.Index(i).Interface())
|
||||
}
|
||||
default:
|
||||
panic("gen: slice elem type not supported")
|
||||
}
|
||||
w.printf("}")
|
||||
}
|
||||
|
||||
// WriteType writes a definition of the type of the given value and returns the
|
||||
// type name.
|
||||
func (w *CodeWriter) WriteType(x interface{}) string {
|
||||
t := reflect.TypeOf(x)
|
||||
w.printf("type %s struct {\n", t.Name())
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
w.printf("\t%s %s\n", t.Field(i).Name, t.Field(i).Type)
|
||||
}
|
||||
w.printf("}\n")
|
||||
return t.Name()
|
||||
}
|
||||
|
||||
// typeName returns the name of the go type of x.
|
||||
func typeName(x interface{}) string {
|
||||
t := reflect.ValueOf(x).Type()
|
||||
return strings.Replace(fmt.Sprint(t), "main.", "", 1)
|
||||
}
|
||||
281
vendor/golang.org/x/text/internal/gen/gen.go
generated
vendored
Normal file
281
vendor/golang.org/x/text/internal/gen/gen.go
generated
vendored
Normal file
@@ -0,0 +1,281 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package gen contains common code for the various code generation tools in the
|
||||
// text repository. Its usage ensures consistency between tools.
|
||||
//
|
||||
// This package defines command line flags that are common to most generation
|
||||
// tools. The flags allow for specifying specific Unicode and CLDR versions
|
||||
// in the public Unicode data repository (http://www.unicode.org/Public).
|
||||
//
|
||||
// A local Unicode data mirror can be set through the flag -local or the
|
||||
// environment variable UNICODE_DIR. The former takes precedence. The local
|
||||
// directory should follow the same structure as the public repository.
|
||||
//
|
||||
// IANA data can also optionally be mirrored by putting it in the iana directory
|
||||
// rooted at the top of the local mirror. Beware, though, that IANA data is not
|
||||
// versioned. So it is up to the developer to use the right version.
|
||||
package gen // import "golang.org/x/text/internal/gen"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/build"
|
||||
"go/format"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
"unicode"
|
||||
|
||||
"golang.org/x/text/unicode/cldr"
|
||||
)
|
||||
|
||||
var (
|
||||
url = flag.String("url",
|
||||
"http://www.unicode.org/Public",
|
||||
"URL of Unicode database directory")
|
||||
iana = flag.String("iana",
|
||||
"http://www.iana.org",
|
||||
"URL of the IANA repository")
|
||||
unicodeVersion = flag.String("unicode",
|
||||
getEnv("UNICODE_VERSION", unicode.Version),
|
||||
"unicode version to use")
|
||||
cldrVersion = flag.String("cldr",
|
||||
getEnv("CLDR_VERSION", cldr.Version),
|
||||
"cldr version to use")
|
||||
)
|
||||
|
||||
func getEnv(name, def string) string {
|
||||
if v := os.Getenv(name); v != "" {
|
||||
return v
|
||||
}
|
||||
return def
|
||||
}
|
||||
|
||||
// Init performs common initialization for a gen command. It parses the flags
|
||||
// and sets up the standard logging parameters.
|
||||
func Init() {
|
||||
log.SetPrefix("")
|
||||
log.SetFlags(log.Lshortfile)
|
||||
flag.Parse()
|
||||
}
|
||||
|
||||
const header = `// This file was generated by go generate; DO NOT EDIT
|
||||
|
||||
package %s
|
||||
|
||||
`
|
||||
|
||||
// UnicodeVersion reports the requested Unicode version.
|
||||
func UnicodeVersion() string {
|
||||
return *unicodeVersion
|
||||
}
|
||||
|
||||
// UnicodeVersion reports the requested CLDR version.
|
||||
func CLDRVersion() string {
|
||||
return *cldrVersion
|
||||
}
|
||||
|
||||
// IsLocal reports whether data files are available locally.
|
||||
func IsLocal() bool {
|
||||
dir, err := localReadmeFile()
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if _, err = os.Stat(dir); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// OpenUCDFile opens the requested UCD file. The file is specified relative to
|
||||
// the public Unicode root directory. It will call log.Fatal if there are any
|
||||
// errors.
|
||||
func OpenUCDFile(file string) io.ReadCloser {
|
||||
return openUnicode(path.Join(*unicodeVersion, "ucd", file))
|
||||
}
|
||||
|
||||
// OpenCLDRCoreZip opens the CLDR core zip file. It will call log.Fatal if there
|
||||
// are any errors.
|
||||
func OpenCLDRCoreZip() io.ReadCloser {
|
||||
return OpenUnicodeFile("cldr", *cldrVersion, "core.zip")
|
||||
}
|
||||
|
||||
// OpenUnicodeFile opens the requested file of the requested category from the
|
||||
// root of the Unicode data archive. The file is specified relative to the
|
||||
// public Unicode root directory. If version is "", it will use the default
|
||||
// Unicode version. It will call log.Fatal if there are any errors.
|
||||
func OpenUnicodeFile(category, version, file string) io.ReadCloser {
|
||||
if version == "" {
|
||||
version = UnicodeVersion()
|
||||
}
|
||||
return openUnicode(path.Join(category, version, file))
|
||||
}
|
||||
|
||||
// OpenIANAFile opens the requested IANA file. The file is specified relative
|
||||
// to the IANA root, which is typically either http://www.iana.org or the
|
||||
// iana directory in the local mirror. It will call log.Fatal if there are any
|
||||
// errors.
|
||||
func OpenIANAFile(path string) io.ReadCloser {
|
||||
return Open(*iana, "iana", path)
|
||||
}
|
||||
|
||||
var (
|
||||
dirMutex sync.Mutex
|
||||
localDir string
|
||||
)
|
||||
|
||||
const permissions = 0755
|
||||
|
||||
func localReadmeFile() (string, error) {
|
||||
p, err := build.Import("golang.org/x/text", "", build.FindOnly)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Could not locate package: %v", err)
|
||||
}
|
||||
return filepath.Join(p.Dir, "DATA", "README"), nil
|
||||
}
|
||||
|
||||
func getLocalDir() string {
|
||||
dirMutex.Lock()
|
||||
defer dirMutex.Unlock()
|
||||
|
||||
readme, err := localReadmeFile()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
dir := filepath.Dir(readme)
|
||||
if _, err := os.Stat(readme); err != nil {
|
||||
if err := os.MkdirAll(dir, permissions); err != nil {
|
||||
log.Fatalf("Could not create directory: %v", err)
|
||||
}
|
||||
ioutil.WriteFile(readme, []byte(readmeTxt), permissions)
|
||||
}
|
||||
return dir
|
||||
}
|
||||
|
||||
const readmeTxt = `Generated by golang.org/x/text/internal/gen. DO NOT EDIT.
|
||||
|
||||
This directory contains downloaded files used to generate the various tables
|
||||
in the golang.org/x/text subrepo.
|
||||
|
||||
Note that the language subtag repo (iana/assignments/language-subtag-registry)
|
||||
and all other times in the iana subdirectory are not versioned and will need
|
||||
to be periodically manually updated. The easiest way to do this is to remove
|
||||
the entire iana directory. This is mostly of concern when updating the language
|
||||
package.
|
||||
`
|
||||
|
||||
// Open opens subdir/path if a local directory is specified and the file exists,
|
||||
// where subdir is a directory relative to the local root, or fetches it from
|
||||
// urlRoot/path otherwise. It will call log.Fatal if there are any errors.
|
||||
func Open(urlRoot, subdir, path string) io.ReadCloser {
|
||||
file := filepath.Join(getLocalDir(), subdir, filepath.FromSlash(path))
|
||||
return open(file, urlRoot, path)
|
||||
}
|
||||
|
||||
func openUnicode(path string) io.ReadCloser {
|
||||
file := filepath.Join(getLocalDir(), filepath.FromSlash(path))
|
||||
return open(file, *url, path)
|
||||
}
|
||||
|
||||
// TODO: automatically periodically update non-versioned files.
|
||||
|
||||
func open(file, urlRoot, path string) io.ReadCloser {
|
||||
if f, err := os.Open(file); err == nil {
|
||||
return f
|
||||
}
|
||||
r := get(urlRoot, path)
|
||||
defer r.Close()
|
||||
b, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not download file: %v", err)
|
||||
}
|
||||
os.MkdirAll(filepath.Dir(file), permissions)
|
||||
if err := ioutil.WriteFile(file, b, permissions); err != nil {
|
||||
log.Fatalf("Could not create file: %v", err)
|
||||
}
|
||||
return ioutil.NopCloser(bytes.NewReader(b))
|
||||
}
|
||||
|
||||
func get(root, path string) io.ReadCloser {
|
||||
url := root + "/" + path
|
||||
fmt.Printf("Fetching %s...", url)
|
||||
defer fmt.Println(" done.")
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
log.Fatalf("HTTP GET: %v", err)
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
log.Fatalf("Bad GET status for %q: %q", url, resp.Status)
|
||||
}
|
||||
return resp.Body
|
||||
}
|
||||
|
||||
// TODO: use Write*Version in all applicable packages.
|
||||
|
||||
// WriteUnicodeVersion writes a constant for the Unicode version from which the
|
||||
// tables are generated.
|
||||
func WriteUnicodeVersion(w io.Writer) {
|
||||
fmt.Fprintf(w, "// UnicodeVersion is the Unicode version from which the tables in this package are derived.\n")
|
||||
fmt.Fprintf(w, "const UnicodeVersion = %q\n\n", UnicodeVersion())
|
||||
}
|
||||
|
||||
// WriteCLDRVersion writes a constant for the CLDR version from which the
|
||||
// tables are generated.
|
||||
func WriteCLDRVersion(w io.Writer) {
|
||||
fmt.Fprintf(w, "// CLDRVersion is the CLDR version from which the tables in this package are derived.\n")
|
||||
fmt.Fprintf(w, "const CLDRVersion = %q\n\n", CLDRVersion())
|
||||
}
|
||||
|
||||
// WriteGoFile prepends a standard file comment and package statement to the
|
||||
// given bytes, applies gofmt, and writes them to a file with the given name.
|
||||
// It will call log.Fatal if there are any errors.
|
||||
func WriteGoFile(filename, pkg string, b []byte) {
|
||||
w, err := os.Create(filename)
|
||||
if err != nil {
|
||||
log.Fatalf("Could not create file %s: %v", filename, err)
|
||||
}
|
||||
defer w.Close()
|
||||
if _, err = WriteGo(w, pkg, b); err != nil {
|
||||
log.Fatalf("Error writing file %s: %v", filename, err)
|
||||
}
|
||||
}
|
||||
|
||||
// WriteGo prepends a standard file comment and package statement to the given
|
||||
// bytes, applies gofmt, and writes them to w.
|
||||
func WriteGo(w io.Writer, pkg string, b []byte) (n int, err error) {
|
||||
src := []byte(fmt.Sprintf(header, pkg))
|
||||
src = append(src, b...)
|
||||
formatted, err := format.Source(src)
|
||||
if err != nil {
|
||||
// Print the generated code even in case of an error so that the
|
||||
// returned error can be meaningfully interpreted.
|
||||
n, _ = w.Write(src)
|
||||
return n, err
|
||||
}
|
||||
return w.Write(formatted)
|
||||
}
|
||||
|
||||
// Repackage rewrites a Go file from belonging to package main to belonging to
|
||||
// the given package.
|
||||
func Repackage(inFile, outFile, pkg string) {
|
||||
src, err := ioutil.ReadFile(inFile)
|
||||
if err != nil {
|
||||
log.Fatalf("reading %s: %v", inFile, err)
|
||||
}
|
||||
const toDelete = "package main\n\n"
|
||||
i := bytes.Index(src, []byte(toDelete))
|
||||
if i < 0 {
|
||||
log.Fatalf("Could not find %q in %s.", toDelete, inFile)
|
||||
}
|
||||
w := &bytes.Buffer{}
|
||||
w.Write(src[i+len(toDelete):])
|
||||
WriteGoFile(outFile, pkg, w.Bytes())
|
||||
}
|
||||
58
vendor/golang.org/x/text/internal/triegen/compact.go
generated
vendored
Normal file
58
vendor/golang.org/x/text/internal/triegen/compact.go
generated
vendored
Normal file
@@ -0,0 +1,58 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package triegen
|
||||
|
||||
// This file defines Compacter and its implementations.
|
||||
|
||||
import "io"
|
||||
|
||||
// A Compacter generates an alternative, more space-efficient way to store a
|
||||
// trie value block. A trie value block holds all possible values for the last
|
||||
// byte of a UTF-8 encoded rune. Excluding ASCII characters, a trie value block
|
||||
// always has 64 values, as a UTF-8 encoding ends with a byte in [0x80, 0xC0).
|
||||
type Compacter interface {
|
||||
// Size returns whether the Compacter could encode the given block as well
|
||||
// as its size in case it can. len(v) is always 64.
|
||||
Size(v []uint64) (sz int, ok bool)
|
||||
|
||||
// Store stores the block using the Compacter's compression method.
|
||||
// It returns a handle with which the block can be retrieved.
|
||||
// len(v) is always 64.
|
||||
Store(v []uint64) uint32
|
||||
|
||||
// Print writes the data structures associated to the given store to w.
|
||||
Print(w io.Writer) error
|
||||
|
||||
// Handler returns the name of a function that gets called during trie
|
||||
// lookup for blocks generated by the Compacter. The function should be of
|
||||
// the form func (n uint32, b byte) uint64, where n is the index returned by
|
||||
// the Compacter's Store method and b is the last byte of the UTF-8
|
||||
// encoding, where 0x80 <= b < 0xC0, for which to do the lookup in the
|
||||
// block.
|
||||
Handler() string
|
||||
}
|
||||
|
||||
// simpleCompacter is the default Compacter used by builder. It implements a
|
||||
// normal trie block.
|
||||
type simpleCompacter builder
|
||||
|
||||
func (b *simpleCompacter) Size([]uint64) (sz int, ok bool) {
|
||||
return blockSize * b.ValueSize, true
|
||||
}
|
||||
|
||||
func (b *simpleCompacter) Store(v []uint64) uint32 {
|
||||
h := uint32(len(b.ValueBlocks) - blockOffset)
|
||||
b.ValueBlocks = append(b.ValueBlocks, v)
|
||||
return h
|
||||
}
|
||||
|
||||
func (b *simpleCompacter) Print(io.Writer) error {
|
||||
// Structures are printed in print.go.
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *simpleCompacter) Handler() string {
|
||||
panic("Handler should be special-cased for this Compacter")
|
||||
}
|
||||
251
vendor/golang.org/x/text/internal/triegen/print.go
generated
vendored
Normal file
251
vendor/golang.org/x/text/internal/triegen/print.go
generated
vendored
Normal file
@@ -0,0 +1,251 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package triegen
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
// print writes all the data structures as well as the code necessary to use the
|
||||
// trie to w.
|
||||
func (b *builder) print(w io.Writer) error {
|
||||
b.Stats.NValueEntries = len(b.ValueBlocks) * blockSize
|
||||
b.Stats.NValueBytes = len(b.ValueBlocks) * blockSize * b.ValueSize
|
||||
b.Stats.NIndexEntries = len(b.IndexBlocks) * blockSize
|
||||
b.Stats.NIndexBytes = len(b.IndexBlocks) * blockSize * b.IndexSize
|
||||
b.Stats.NHandleBytes = len(b.Trie) * 2 * b.IndexSize
|
||||
|
||||
// If we only have one root trie, all starter blocks are at position 0 and
|
||||
// we can access the arrays directly.
|
||||
if len(b.Trie) == 1 {
|
||||
// At this point we cannot refer to the generated tables directly.
|
||||
b.ASCIIBlock = b.Name + "Values"
|
||||
b.StarterBlock = b.Name + "Index"
|
||||
} else {
|
||||
// Otherwise we need to have explicit starter indexes in the trie
|
||||
// structure.
|
||||
b.ASCIIBlock = "t.ascii"
|
||||
b.StarterBlock = "t.utf8Start"
|
||||
}
|
||||
|
||||
b.SourceType = "[]byte"
|
||||
if err := lookupGen.Execute(w, b); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.SourceType = "string"
|
||||
if err := lookupGen.Execute(w, b); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := trieGen.Execute(w, b); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, c := range b.Compactions {
|
||||
if err := c.c.Print(w); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func printValues(n int, values []uint64) string {
|
||||
w := &bytes.Buffer{}
|
||||
boff := n * blockSize
|
||||
fmt.Fprintf(w, "\t// Block %#x, offset %#x", n, boff)
|
||||
var newline bool
|
||||
for i, v := range values {
|
||||
if i%6 == 0 {
|
||||
newline = true
|
||||
}
|
||||
if v != 0 {
|
||||
if newline {
|
||||
fmt.Fprintf(w, "\n")
|
||||
newline = false
|
||||
}
|
||||
fmt.Fprintf(w, "\t%#02x:%#04x, ", boff+i, v)
|
||||
}
|
||||
}
|
||||
return w.String()
|
||||
}
|
||||
|
||||
func printIndex(b *builder, nr int, n *node) string {
|
||||
w := &bytes.Buffer{}
|
||||
boff := nr * blockSize
|
||||
fmt.Fprintf(w, "\t// Block %#x, offset %#x", nr, boff)
|
||||
var newline bool
|
||||
for i, c := range n.children {
|
||||
if i%8 == 0 {
|
||||
newline = true
|
||||
}
|
||||
if c != nil {
|
||||
v := b.Compactions[c.index.compaction].Offset + uint32(c.index.index)
|
||||
if v != 0 {
|
||||
if newline {
|
||||
fmt.Fprintf(w, "\n")
|
||||
newline = false
|
||||
}
|
||||
fmt.Fprintf(w, "\t%#02x:%#02x, ", boff+i, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
return w.String()
|
||||
}
|
||||
|
||||
var (
|
||||
trieGen = template.Must(template.New("trie").Funcs(template.FuncMap{
|
||||
"printValues": printValues,
|
||||
"printIndex": printIndex,
|
||||
"title": strings.Title,
|
||||
"dec": func(x int) int { return x - 1 },
|
||||
"psize": func(n int) string {
|
||||
return fmt.Sprintf("%d bytes (%.2f KiB)", n, float64(n)/1024)
|
||||
},
|
||||
}).Parse(trieTemplate))
|
||||
lookupGen = template.Must(template.New("lookup").Parse(lookupTemplate))
|
||||
)
|
||||
|
||||
// TODO: consider the return type of lookup. It could be uint64, even if the
|
||||
// internal value type is smaller. We will have to verify this with the
|
||||
// performance of unicode/norm, which is very sensitive to such changes.
|
||||
const trieTemplate = `{{$b := .}}{{$multi := gt (len .Trie) 1}}
|
||||
// {{.Name}}Trie. Total size: {{psize .Size}}. Checksum: {{printf "%08x" .Checksum}}.
|
||||
type {{.Name}}Trie struct { {{if $multi}}
|
||||
ascii []{{.ValueType}} // index for ASCII bytes
|
||||
utf8Start []{{.IndexType}} // index for UTF-8 bytes >= 0xC0
|
||||
{{end}}}
|
||||
|
||||
func new{{title .Name}}Trie(i int) *{{.Name}}Trie { {{if $multi}}
|
||||
h := {{.Name}}TrieHandles[i]
|
||||
return &{{.Name}}Trie{ {{.Name}}Values[uint32(h.ascii)<<6:], {{.Name}}Index[uint32(h.multi)<<6:] }
|
||||
}
|
||||
|
||||
type {{.Name}}TrieHandle struct {
|
||||
ascii, multi {{.IndexType}}
|
||||
}
|
||||
|
||||
// {{.Name}}TrieHandles: {{len .Trie}} handles, {{.Stats.NHandleBytes}} bytes
|
||||
var {{.Name}}TrieHandles = [{{len .Trie}}]{{.Name}}TrieHandle{
|
||||
{{range .Trie}} { {{.ASCIIIndex}}, {{.StarterIndex}} }, // {{printf "%08x" .Checksum}}: {{.Name}}
|
||||
{{end}}}{{else}}
|
||||
return &{{.Name}}Trie{}
|
||||
}
|
||||
{{end}}
|
||||
// lookupValue determines the type of block n and looks up the value for b.
|
||||
func (t *{{.Name}}Trie) lookupValue(n uint32, b byte) {{.ValueType}}{{$last := dec (len .Compactions)}} {
|
||||
switch { {{range $i, $c := .Compactions}}
|
||||
{{if eq $i $last}}default{{else}}case n < {{$c.Cutoff}}{{end}}:{{if ne $i 0}}
|
||||
n -= {{$c.Offset}}{{end}}
|
||||
return {{print $b.ValueType}}({{$c.Handler}}){{end}}
|
||||
}
|
||||
}
|
||||
|
||||
// {{.Name}}Values: {{len .ValueBlocks}} blocks, {{.Stats.NValueEntries}} entries, {{.Stats.NValueBytes}} bytes
|
||||
// The third block is the zero block.
|
||||
var {{.Name}}Values = [{{.Stats.NValueEntries}}]{{.ValueType}} {
|
||||
{{range $i, $v := .ValueBlocks}}{{printValues $i $v}}
|
||||
{{end}}}
|
||||
|
||||
// {{.Name}}Index: {{len .IndexBlocks}} blocks, {{.Stats.NIndexEntries}} entries, {{.Stats.NIndexBytes}} bytes
|
||||
// Block 0 is the zero block.
|
||||
var {{.Name}}Index = [{{.Stats.NIndexEntries}}]{{.IndexType}} {
|
||||
{{range $i, $v := .IndexBlocks}}{{printIndex $b $i $v}}
|
||||
{{end}}}
|
||||
`
|
||||
|
||||
// TODO: consider allowing zero-length strings after evaluating performance with
|
||||
// unicode/norm.
|
||||
const lookupTemplate = `
|
||||
// lookup{{if eq .SourceType "string"}}String{{end}} returns the trie value for the first UTF-8 encoding in s and
|
||||
// the width in bytes of this encoding. The size will be 0 if s does not
|
||||
// hold enough bytes to complete the encoding. len(s) must be greater than 0.
|
||||
func (t *{{.Name}}Trie) lookup{{if eq .SourceType "string"}}String{{end}}(s {{.SourceType}}) (v {{.ValueType}}, sz int) {
|
||||
c0 := s[0]
|
||||
switch {
|
||||
case c0 < 0x80: // is ASCII
|
||||
return {{.ASCIIBlock}}[c0], 1
|
||||
case c0 < 0xC2:
|
||||
return 0, 1 // Illegal UTF-8: not a starter, not ASCII.
|
||||
case c0 < 0xE0: // 2-byte UTF-8
|
||||
if len(s) < 2 {
|
||||
return 0, 0
|
||||
}
|
||||
i := {{.StarterBlock}}[c0]
|
||||
c1 := s[1]
|
||||
if c1 < 0x80 || 0xC0 <= c1 {
|
||||
return 0, 1 // Illegal UTF-8: not a continuation byte.
|
||||
}
|
||||
return t.lookupValue(uint32(i), c1), 2
|
||||
case c0 < 0xF0: // 3-byte UTF-8
|
||||
if len(s) < 3 {
|
||||
return 0, 0
|
||||
}
|
||||
i := {{.StarterBlock}}[c0]
|
||||
c1 := s[1]
|
||||
if c1 < 0x80 || 0xC0 <= c1 {
|
||||
return 0, 1 // Illegal UTF-8: not a continuation byte.
|
||||
}
|
||||
o := uint32(i)<<6 + uint32(c1)
|
||||
i = {{.Name}}Index[o]
|
||||
c2 := s[2]
|
||||
if c2 < 0x80 || 0xC0 <= c2 {
|
||||
return 0, 2 // Illegal UTF-8: not a continuation byte.
|
||||
}
|
||||
return t.lookupValue(uint32(i), c2), 3
|
||||
case c0 < 0xF8: // 4-byte UTF-8
|
||||
if len(s) < 4 {
|
||||
return 0, 0
|
||||
}
|
||||
i := {{.StarterBlock}}[c0]
|
||||
c1 := s[1]
|
||||
if c1 < 0x80 || 0xC0 <= c1 {
|
||||
return 0, 1 // Illegal UTF-8: not a continuation byte.
|
||||
}
|
||||
o := uint32(i)<<6 + uint32(c1)
|
||||
i = {{.Name}}Index[o]
|
||||
c2 := s[2]
|
||||
if c2 < 0x80 || 0xC0 <= c2 {
|
||||
return 0, 2 // Illegal UTF-8: not a continuation byte.
|
||||
}
|
||||
o = uint32(i)<<6 + uint32(c2)
|
||||
i = {{.Name}}Index[o]
|
||||
c3 := s[3]
|
||||
if c3 < 0x80 || 0xC0 <= c3 {
|
||||
return 0, 3 // Illegal UTF-8: not a continuation byte.
|
||||
}
|
||||
return t.lookupValue(uint32(i), c3), 4
|
||||
}
|
||||
// Illegal rune
|
||||
return 0, 1
|
||||
}
|
||||
|
||||
// lookup{{if eq .SourceType "string"}}String{{end}}Unsafe returns the trie value for the first UTF-8 encoding in s.
|
||||
// s must start with a full and valid UTF-8 encoded rune.
|
||||
func (t *{{.Name}}Trie) lookup{{if eq .SourceType "string"}}String{{end}}Unsafe(s {{.SourceType}}) {{.ValueType}} {
|
||||
c0 := s[0]
|
||||
if c0 < 0x80 { // is ASCII
|
||||
return {{.ASCIIBlock}}[c0]
|
||||
}
|
||||
i := {{.StarterBlock}}[c0]
|
||||
if c0 < 0xE0 { // 2-byte UTF-8
|
||||
return t.lookupValue(uint32(i), s[1])
|
||||
}
|
||||
i = {{.Name}}Index[uint32(i)<<6+uint32(s[1])]
|
||||
if c0 < 0xF0 { // 3-byte UTF-8
|
||||
return t.lookupValue(uint32(i), s[2])
|
||||
}
|
||||
i = {{.Name}}Index[uint32(i)<<6+uint32(s[2])]
|
||||
if c0 < 0xF8 { // 4-byte UTF-8
|
||||
return t.lookupValue(uint32(i), s[3])
|
||||
}
|
||||
return 0
|
||||
}
|
||||
`
|
||||
494
vendor/golang.org/x/text/internal/triegen/triegen.go
generated
vendored
Normal file
494
vendor/golang.org/x/text/internal/triegen/triegen.go
generated
vendored
Normal file
@@ -0,0 +1,494 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package triegen implements a code generator for a trie for associating
|
||||
// unsigned integer values with UTF-8 encoded runes.
|
||||
//
|
||||
// Many of the go.text packages use tries for storing per-rune information. A
|
||||
// trie is especially useful if many of the runes have the same value. If this
|
||||
// is the case, many blocks can be expected to be shared allowing for
|
||||
// information on many runes to be stored in little space.
|
||||
//
|
||||
// As most of the lookups are done directly on []byte slices, the tries use the
|
||||
// UTF-8 bytes directly for the lookup. This saves a conversion from UTF-8 to
|
||||
// runes and contributes a little bit to better performance. It also naturally
|
||||
// provides a fast path for ASCII.
|
||||
//
|
||||
// Space is also an issue. There are many code points defined in Unicode and as
|
||||
// a result tables can get quite large. So every byte counts. The triegen
|
||||
// package automatically chooses the smallest integer values to represent the
|
||||
// tables. Compacters allow further compression of the trie by allowing for
|
||||
// alternative representations of individual trie blocks.
|
||||
//
|
||||
// triegen allows generating multiple tries as a single structure. This is
|
||||
// useful when, for example, one wants to generate tries for several languages
|
||||
// that have a lot of values in common. Some existing libraries for
|
||||
// internationalization store all per-language data as a dynamically loadable
|
||||
// chunk. The go.text packages are designed with the assumption that the user
|
||||
// typically wants to compile in support for all supported languages, in line
|
||||
// with the approach common to Go to create a single standalone binary. The
|
||||
// multi-root trie approach can give significant storage savings in this
|
||||
// scenario.
|
||||
//
|
||||
// triegen generates both tables and code. The code is optimized to use the
|
||||
// automatically chosen data types. The following code is generated for a Trie
|
||||
// or multiple Tries named "foo":
|
||||
// - type fooTrie
|
||||
// The trie type.
|
||||
//
|
||||
// - func newFooTrie(x int) *fooTrie
|
||||
// Trie constructor, where x is the index of the trie passed to Gen.
|
||||
//
|
||||
// - func (t *fooTrie) lookup(s []byte) (v uintX, sz int)
|
||||
// The lookup method, where uintX is automatically chosen.
|
||||
//
|
||||
// - func lookupString, lookupUnsafe and lookupStringUnsafe
|
||||
// Variants of the above.
|
||||
//
|
||||
// - var fooValues and fooIndex and any tables generated by Compacters.
|
||||
// The core trie data.
|
||||
//
|
||||
// - var fooTrieHandles
|
||||
// Indexes of starter blocks in case of multiple trie roots.
|
||||
//
|
||||
// It is recommended that users test the generated trie by checking the returned
|
||||
// value for every rune. Such exhaustive tests are possible as the the number of
|
||||
// runes in Unicode is limited.
|
||||
package triegen // import "golang.org/x/text/internal/triegen"
|
||||
|
||||
// TODO: Arguably, the internally optimized data types would not have to be
|
||||
// exposed in the generated API. We could also investigate not generating the
|
||||
// code, but using it through a package. We would have to investigate the impact
|
||||
// on performance of making such change, though. For packages like unicode/norm,
|
||||
// small changes like this could tank performance.
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"hash/crc64"
|
||||
"io"
|
||||
"log"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// builder builds a set of tries for associating values with runes. The set of
|
||||
// tries can share common index and value blocks.
|
||||
type builder struct {
|
||||
Name string
|
||||
|
||||
// ValueType is the type of the trie values looked up.
|
||||
ValueType string
|
||||
|
||||
// ValueSize is the byte size of the ValueType.
|
||||
ValueSize int
|
||||
|
||||
// IndexType is the type of trie index values used for all UTF-8 bytes of
|
||||
// a rune except the last one.
|
||||
IndexType string
|
||||
|
||||
// IndexSize is the byte size of the IndexType.
|
||||
IndexSize int
|
||||
|
||||
// SourceType is used when generating the lookup functions. If the user
|
||||
// requests StringSupport, all lookup functions will be generated for
|
||||
// string input as well.
|
||||
SourceType string
|
||||
|
||||
Trie []*Trie
|
||||
|
||||
IndexBlocks []*node
|
||||
ValueBlocks [][]uint64
|
||||
Compactions []compaction
|
||||
Checksum uint64
|
||||
|
||||
ASCIIBlock string
|
||||
StarterBlock string
|
||||
|
||||
indexBlockIdx map[uint64]int
|
||||
valueBlockIdx map[uint64]nodeIndex
|
||||
asciiBlockIdx map[uint64]int
|
||||
|
||||
// Stats are used to fill out the template.
|
||||
Stats struct {
|
||||
NValueEntries int
|
||||
NValueBytes int
|
||||
NIndexEntries int
|
||||
NIndexBytes int
|
||||
NHandleBytes int
|
||||
}
|
||||
|
||||
err error
|
||||
}
|
||||
|
||||
// A nodeIndex encodes the index of a node, which is defined by the compaction
|
||||
// which stores it and an index within the compaction. For internal nodes, the
|
||||
// compaction is always 0.
|
||||
type nodeIndex struct {
|
||||
compaction int
|
||||
index int
|
||||
}
|
||||
|
||||
// compaction keeps track of stats used for the compaction.
|
||||
type compaction struct {
|
||||
c Compacter
|
||||
blocks []*node
|
||||
maxHandle uint32
|
||||
totalSize int
|
||||
|
||||
// Used by template-based generator and thus exported.
|
||||
Cutoff uint32
|
||||
Offset uint32
|
||||
Handler string
|
||||
}
|
||||
|
||||
func (b *builder) setError(err error) {
|
||||
if b.err == nil {
|
||||
b.err = err
|
||||
}
|
||||
}
|
||||
|
||||
// An Option can be passed to Gen.
|
||||
type Option func(b *builder) error
|
||||
|
||||
// Compact configures the trie generator to use the given Compacter.
|
||||
func Compact(c Compacter) Option {
|
||||
return func(b *builder) error {
|
||||
b.Compactions = append(b.Compactions, compaction{
|
||||
c: c,
|
||||
Handler: c.Handler() + "(n, b)"})
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Gen writes Go code for a shared trie lookup structure to w for the given
|
||||
// Tries. The generated trie type will be called nameTrie. newNameTrie(x) will
|
||||
// return the *nameTrie for tries[x]. A value can be looked up by using one of
|
||||
// the various lookup methods defined on nameTrie. It returns the table size of
|
||||
// the generated trie.
|
||||
func Gen(w io.Writer, name string, tries []*Trie, opts ...Option) (sz int, err error) {
|
||||
// The index contains two dummy blocks, followed by the zero block. The zero
|
||||
// block is at offset 0x80, so that the offset for the zero block for
|
||||
// continuation bytes is 0.
|
||||
b := &builder{
|
||||
Name: name,
|
||||
Trie: tries,
|
||||
IndexBlocks: []*node{{}, {}, {}},
|
||||
Compactions: []compaction{{
|
||||
Handler: name + "Values[n<<6+uint32(b)]",
|
||||
}},
|
||||
// The 0 key in indexBlockIdx and valueBlockIdx is the hash of the zero
|
||||
// block.
|
||||
indexBlockIdx: map[uint64]int{0: 0},
|
||||
valueBlockIdx: map[uint64]nodeIndex{0: {}},
|
||||
asciiBlockIdx: map[uint64]int{},
|
||||
}
|
||||
b.Compactions[0].c = (*simpleCompacter)(b)
|
||||
|
||||
for _, f := range opts {
|
||||
if err := f(b); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
b.build()
|
||||
if b.err != nil {
|
||||
return 0, b.err
|
||||
}
|
||||
if err = b.print(w); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return b.Size(), nil
|
||||
}
|
||||
|
||||
// A Trie represents a single root node of a trie. A builder may build several
|
||||
// overlapping tries at once.
|
||||
type Trie struct {
|
||||
root *node
|
||||
|
||||
hiddenTrie
|
||||
}
|
||||
|
||||
// hiddenTrie contains values we want to be visible to the template generator,
|
||||
// but hidden from the API documentation.
|
||||
type hiddenTrie struct {
|
||||
Name string
|
||||
Checksum uint64
|
||||
ASCIIIndex int
|
||||
StarterIndex int
|
||||
}
|
||||
|
||||
// NewTrie returns a new trie root.
|
||||
func NewTrie(name string) *Trie {
|
||||
return &Trie{
|
||||
&node{
|
||||
children: make([]*node, blockSize),
|
||||
values: make([]uint64, utf8.RuneSelf),
|
||||
},
|
||||
hiddenTrie{Name: name},
|
||||
}
|
||||
}
|
||||
|
||||
// Gen is a convenience wrapper around the Gen func passing t as the only trie
|
||||
// and uses the name passed to NewTrie. It returns the size of the generated
|
||||
// tables.
|
||||
func (t *Trie) Gen(w io.Writer, opts ...Option) (sz int, err error) {
|
||||
return Gen(w, t.Name, []*Trie{t}, opts...)
|
||||
}
|
||||
|
||||
// node is a node of the intermediate trie structure.
|
||||
type node struct {
|
||||
// children holds this node's children. It is always of length 64.
|
||||
// A child node may be nil.
|
||||
children []*node
|
||||
|
||||
// values contains the values of this node. If it is non-nil, this node is
|
||||
// either a root or leaf node:
|
||||
// For root nodes, len(values) == 128 and it maps the bytes in [0x00, 0x7F].
|
||||
// For leaf nodes, len(values) == 64 and it maps the bytes in [0x80, 0xBF].
|
||||
values []uint64
|
||||
|
||||
index nodeIndex
|
||||
}
|
||||
|
||||
// Insert associates value with the given rune. Insert will panic if a non-zero
|
||||
// value is passed for an invalid rune.
|
||||
func (t *Trie) Insert(r rune, value uint64) {
|
||||
if value == 0 {
|
||||
return
|
||||
}
|
||||
s := string(r)
|
||||
if []rune(s)[0] != r && value != 0 {
|
||||
// Note: The UCD tables will always assign what amounts to a zero value
|
||||
// to a surrogate. Allowing a zero value for an illegal rune allows
|
||||
// users to iterate over [0..MaxRune] without having to explicitly
|
||||
// exclude surrogates, which would be tedious.
|
||||
panic(fmt.Sprintf("triegen: non-zero value for invalid rune %U", r))
|
||||
}
|
||||
if len(s) == 1 {
|
||||
// It is a root node value (ASCII).
|
||||
t.root.values[s[0]] = value
|
||||
return
|
||||
}
|
||||
|
||||
n := t.root
|
||||
for ; len(s) > 1; s = s[1:] {
|
||||
if n.children == nil {
|
||||
n.children = make([]*node, blockSize)
|
||||
}
|
||||
p := s[0] % blockSize
|
||||
c := n.children[p]
|
||||
if c == nil {
|
||||
c = &node{}
|
||||
n.children[p] = c
|
||||
}
|
||||
if len(s) > 2 && c.values != nil {
|
||||
log.Fatalf("triegen: insert(%U): found internal node with values", r)
|
||||
}
|
||||
n = c
|
||||
}
|
||||
if n.values == nil {
|
||||
n.values = make([]uint64, blockSize)
|
||||
}
|
||||
if n.children != nil {
|
||||
log.Fatalf("triegen: insert(%U): found leaf node that also has child nodes", r)
|
||||
}
|
||||
n.values[s[0]-0x80] = value
|
||||
}
|
||||
|
||||
// Size returns the number of bytes the generated trie will take to store. It
|
||||
// needs to be exported as it is used in the templates.
|
||||
func (b *builder) Size() int {
|
||||
// Index blocks.
|
||||
sz := len(b.IndexBlocks) * blockSize * b.IndexSize
|
||||
|
||||
// Skip the first compaction, which represents the normal value blocks, as
|
||||
// its totalSize does not account for the ASCII blocks, which are managed
|
||||
// separately.
|
||||
sz += len(b.ValueBlocks) * blockSize * b.ValueSize
|
||||
for _, c := range b.Compactions[1:] {
|
||||
sz += c.totalSize
|
||||
}
|
||||
|
||||
// TODO: this computation does not account for the fixed overhead of a using
|
||||
// a compaction, either code or data. As for data, though, the typical
|
||||
// overhead of data is in the order of bytes (2 bytes for cases). Further,
|
||||
// the savings of using a compaction should anyway be substantial for it to
|
||||
// be worth it.
|
||||
|
||||
// For multi-root tries, we also need to account for the handles.
|
||||
if len(b.Trie) > 1 {
|
||||
sz += 2 * b.IndexSize * len(b.Trie)
|
||||
}
|
||||
return sz
|
||||
}
|
||||
|
||||
func (b *builder) build() {
|
||||
// Compute the sizes of the values.
|
||||
var vmax uint64
|
||||
for _, t := range b.Trie {
|
||||
vmax = maxValue(t.root, vmax)
|
||||
}
|
||||
b.ValueType, b.ValueSize = getIntType(vmax)
|
||||
|
||||
// Compute all block allocations.
|
||||
// TODO: first compute the ASCII blocks for all tries and then the other
|
||||
// nodes. ASCII blocks are more restricted in placement, as they require two
|
||||
// blocks to be placed consecutively. Processing them first may improve
|
||||
// sharing (at least one zero block can be expected to be saved.)
|
||||
for _, t := range b.Trie {
|
||||
b.Checksum += b.buildTrie(t)
|
||||
}
|
||||
|
||||
// Compute the offsets for all the Compacters.
|
||||
offset := uint32(0)
|
||||
for i := range b.Compactions {
|
||||
c := &b.Compactions[i]
|
||||
c.Offset = offset
|
||||
offset += c.maxHandle + 1
|
||||
c.Cutoff = offset
|
||||
}
|
||||
|
||||
// Compute the sizes of indexes.
|
||||
// TODO: different byte positions could have different sizes. So far we have
|
||||
// not found a case where this is beneficial.
|
||||
imax := uint64(b.Compactions[len(b.Compactions)-1].Cutoff)
|
||||
for _, ib := range b.IndexBlocks {
|
||||
if x := uint64(ib.index.index); x > imax {
|
||||
imax = x
|
||||
}
|
||||
}
|
||||
b.IndexType, b.IndexSize = getIntType(imax)
|
||||
}
|
||||
|
||||
func maxValue(n *node, max uint64) uint64 {
|
||||
if n == nil {
|
||||
return max
|
||||
}
|
||||
for _, c := range n.children {
|
||||
max = maxValue(c, max)
|
||||
}
|
||||
for _, v := range n.values {
|
||||
if max < v {
|
||||
max = v
|
||||
}
|
||||
}
|
||||
return max
|
||||
}
|
||||
|
||||
func getIntType(v uint64) (string, int) {
|
||||
switch {
|
||||
case v < 1<<8:
|
||||
return "uint8", 1
|
||||
case v < 1<<16:
|
||||
return "uint16", 2
|
||||
case v < 1<<32:
|
||||
return "uint32", 4
|
||||
}
|
||||
return "uint64", 8
|
||||
}
|
||||
|
||||
const (
|
||||
blockSize = 64
|
||||
|
||||
// Subtract two blocks to offset 0x80, the first continuation byte.
|
||||
blockOffset = 2
|
||||
|
||||
// Subtract three blocks to offset 0xC0, the first non-ASCII starter.
|
||||
rootBlockOffset = 3
|
||||
)
|
||||
|
||||
var crcTable = crc64.MakeTable(crc64.ISO)
|
||||
|
||||
func (b *builder) buildTrie(t *Trie) uint64 {
|
||||
n := t.root
|
||||
|
||||
// Get the ASCII offset. For the first trie, the ASCII block will be at
|
||||
// position 0.
|
||||
hasher := crc64.New(crcTable)
|
||||
binary.Write(hasher, binary.BigEndian, n.values)
|
||||
hash := hasher.Sum64()
|
||||
|
||||
v, ok := b.asciiBlockIdx[hash]
|
||||
if !ok {
|
||||
v = len(b.ValueBlocks)
|
||||
b.asciiBlockIdx[hash] = v
|
||||
|
||||
b.ValueBlocks = append(b.ValueBlocks, n.values[:blockSize], n.values[blockSize:])
|
||||
if v == 0 {
|
||||
// Add the zero block at position 2 so that it will be assigned a
|
||||
// zero reference in the lookup blocks.
|
||||
// TODO: always do this? This would allow us to remove a check from
|
||||
// the trie lookup, but at the expense of extra space. Analyze
|
||||
// performance for unicode/norm.
|
||||
b.ValueBlocks = append(b.ValueBlocks, make([]uint64, blockSize))
|
||||
}
|
||||
}
|
||||
t.ASCIIIndex = v
|
||||
|
||||
// Compute remaining offsets.
|
||||
t.Checksum = b.computeOffsets(n, true)
|
||||
// We already subtracted the normal blockOffset from the index. Subtract the
|
||||
// difference for starter bytes.
|
||||
t.StarterIndex = n.index.index - (rootBlockOffset - blockOffset)
|
||||
return t.Checksum
|
||||
}
|
||||
|
||||
func (b *builder) computeOffsets(n *node, root bool) uint64 {
|
||||
// For the first trie, the root lookup block will be at position 3, which is
|
||||
// the offset for UTF-8 non-ASCII starter bytes.
|
||||
first := len(b.IndexBlocks) == rootBlockOffset
|
||||
if first {
|
||||
b.IndexBlocks = append(b.IndexBlocks, n)
|
||||
}
|
||||
|
||||
// We special-case the cases where all values recursively are 0. This allows
|
||||
// for the use of a zero block to which all such values can be directed.
|
||||
hash := uint64(0)
|
||||
if n.children != nil || n.values != nil {
|
||||
hasher := crc64.New(crcTable)
|
||||
for _, c := range n.children {
|
||||
var v uint64
|
||||
if c != nil {
|
||||
v = b.computeOffsets(c, false)
|
||||
}
|
||||
binary.Write(hasher, binary.BigEndian, v)
|
||||
}
|
||||
binary.Write(hasher, binary.BigEndian, n.values)
|
||||
hash = hasher.Sum64()
|
||||
}
|
||||
|
||||
if first {
|
||||
b.indexBlockIdx[hash] = rootBlockOffset - blockOffset
|
||||
}
|
||||
|
||||
// Compacters don't apply to internal nodes.
|
||||
if n.children != nil {
|
||||
v, ok := b.indexBlockIdx[hash]
|
||||
if !ok {
|
||||
v = len(b.IndexBlocks) - blockOffset
|
||||
b.IndexBlocks = append(b.IndexBlocks, n)
|
||||
b.indexBlockIdx[hash] = v
|
||||
}
|
||||
n.index = nodeIndex{0, v}
|
||||
} else {
|
||||
h, ok := b.valueBlockIdx[hash]
|
||||
if !ok {
|
||||
bestI, bestSize := 0, blockSize*b.ValueSize
|
||||
for i, c := range b.Compactions[1:] {
|
||||
if sz, ok := c.c.Size(n.values); ok && bestSize > sz {
|
||||
bestI, bestSize = i+1, sz
|
||||
}
|
||||
}
|
||||
c := &b.Compactions[bestI]
|
||||
c.totalSize += bestSize
|
||||
v := c.c.Store(n.values)
|
||||
if c.maxHandle < v {
|
||||
c.maxHandle = v
|
||||
}
|
||||
h = nodeIndex{bestI, int(v)}
|
||||
b.valueBlockIdx[hash] = h
|
||||
}
|
||||
n.index = h
|
||||
}
|
||||
return hash
|
||||
}
|
||||
376
vendor/golang.org/x/text/internal/ucd/ucd.go
generated
vendored
Normal file
376
vendor/golang.org/x/text/internal/ucd/ucd.go
generated
vendored
Normal file
@@ -0,0 +1,376 @@
|
||||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package ucd provides a parser for Unicode Character Database files, the
|
||||
// format of which is defined in http://www.unicode.org/reports/tr44/. See
|
||||
// http://www.unicode.org/Public/UCD/latest/ucd/ for example files.
|
||||
//
|
||||
// It currently does not support substitutions of missing fields.
|
||||
package ucd // import "golang.org/x/text/internal/ucd"
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"log"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// UnicodeData.txt fields.
|
||||
const (
|
||||
CodePoint = iota
|
||||
Name
|
||||
GeneralCategory
|
||||
CanonicalCombiningClass
|
||||
BidiClass
|
||||
DecompMapping
|
||||
DecimalValue
|
||||
DigitValue
|
||||
NumericValue
|
||||
BidiMirrored
|
||||
Unicode1Name
|
||||
ISOComment
|
||||
SimpleUppercaseMapping
|
||||
SimpleLowercaseMapping
|
||||
SimpleTitlecaseMapping
|
||||
)
|
||||
|
||||
// Parse calls f for each entry in the given reader of a UCD file. It will close
|
||||
// the reader upon return. It will call log.Fatal if any error occurred.
|
||||
//
|
||||
// This implements the most common usage pattern of using Parser.
|
||||
func Parse(r io.ReadCloser, f func(p *Parser)) {
|
||||
defer r.Close()
|
||||
|
||||
p := New(r)
|
||||
for p.Next() {
|
||||
f(p)
|
||||
}
|
||||
if err := p.Err(); err != nil {
|
||||
r.Close() // os.Exit will cause defers not to be called.
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
// An Option is used to configure a Parser.
|
||||
type Option func(p *Parser)
|
||||
|
||||
func keepRanges(p *Parser) {
|
||||
p.keepRanges = true
|
||||
}
|
||||
|
||||
var (
|
||||
// KeepRanges prevents the expansion of ranges. The raw ranges can be
|
||||
// obtained by calling Range(0) on the parser.
|
||||
KeepRanges Option = keepRanges
|
||||
)
|
||||
|
||||
// The Part option register a handler for lines starting with a '@'. The text
|
||||
// after a '@' is available as the first field. Comments are handled as usual.
|
||||
func Part(f func(p *Parser)) Option {
|
||||
return func(p *Parser) {
|
||||
p.partHandler = f
|
||||
}
|
||||
}
|
||||
|
||||
// The CommentHandler option passes comments that are on a line by itself to
|
||||
// a given handler.
|
||||
func CommentHandler(f func(s string)) Option {
|
||||
return func(p *Parser) {
|
||||
p.commentHandler = f
|
||||
}
|
||||
}
|
||||
|
||||
// A Parser parses Unicode Character Database (UCD) files.
|
||||
type Parser struct {
|
||||
scanner *bufio.Scanner
|
||||
|
||||
keepRanges bool // Don't expand rune ranges in field 0.
|
||||
|
||||
err error
|
||||
comment []byte
|
||||
field [][]byte
|
||||
// parsedRange is needed in case Range(0) is called more than once for one
|
||||
// field. In some cases this requires scanning ahead.
|
||||
parsedRange bool
|
||||
rangeStart, rangeEnd rune
|
||||
|
||||
partHandler func(p *Parser)
|
||||
commentHandler func(s string)
|
||||
}
|
||||
|
||||
func (p *Parser) setError(err error) {
|
||||
if p.err == nil {
|
||||
p.err = err
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Parser) getField(i int) []byte {
|
||||
if i >= len(p.field) {
|
||||
return nil
|
||||
}
|
||||
return p.field[i]
|
||||
}
|
||||
|
||||
// Err returns a non-nil error if any error occurred during parsing.
|
||||
func (p *Parser) Err() error {
|
||||
return p.err
|
||||
}
|
||||
|
||||
// New returns a Parser for the given Reader.
|
||||
func New(r io.Reader, o ...Option) *Parser {
|
||||
p := &Parser{
|
||||
scanner: bufio.NewScanner(r),
|
||||
}
|
||||
for _, f := range o {
|
||||
f(p)
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// Next parses the next line in the file. It returns true if a line was parsed
|
||||
// and false if it reached the end of the file.
|
||||
func (p *Parser) Next() bool {
|
||||
if !p.keepRanges && p.rangeStart < p.rangeEnd {
|
||||
p.rangeStart++
|
||||
return true
|
||||
}
|
||||
p.comment = nil
|
||||
p.field = p.field[:0]
|
||||
p.parsedRange = false
|
||||
|
||||
for p.scanner.Scan() {
|
||||
b := p.scanner.Bytes()
|
||||
if len(b) == 0 {
|
||||
continue
|
||||
}
|
||||
if b[0] == '#' {
|
||||
if p.commentHandler != nil {
|
||||
p.commentHandler(strings.TrimSpace(string(b[1:])))
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse line
|
||||
if i := bytes.IndexByte(b, '#'); i != -1 {
|
||||
p.comment = bytes.TrimSpace(b[i+1:])
|
||||
b = b[:i]
|
||||
}
|
||||
if b[0] == '@' {
|
||||
if p.partHandler != nil {
|
||||
p.field = append(p.field, bytes.TrimSpace(b[1:]))
|
||||
p.partHandler(p)
|
||||
p.field = p.field[:0]
|
||||
}
|
||||
p.comment = nil
|
||||
continue
|
||||
}
|
||||
for {
|
||||
i := bytes.IndexByte(b, ';')
|
||||
if i == -1 {
|
||||
p.field = append(p.field, bytes.TrimSpace(b))
|
||||
break
|
||||
}
|
||||
p.field = append(p.field, bytes.TrimSpace(b[:i]))
|
||||
b = b[i+1:]
|
||||
}
|
||||
if !p.keepRanges {
|
||||
p.rangeStart, p.rangeEnd = p.getRange(0)
|
||||
}
|
||||
return true
|
||||
}
|
||||
p.setError(p.scanner.Err())
|
||||
return false
|
||||
}
|
||||
|
||||
func parseRune(b []byte) (rune, error) {
|
||||
if len(b) > 2 && b[0] == 'U' && b[1] == '+' {
|
||||
b = b[2:]
|
||||
}
|
||||
x, err := strconv.ParseUint(string(b), 16, 32)
|
||||
return rune(x), err
|
||||
}
|
||||
|
||||
func (p *Parser) parseRune(b []byte) rune {
|
||||
x, err := parseRune(b)
|
||||
p.setError(err)
|
||||
return x
|
||||
}
|
||||
|
||||
// Rune parses and returns field i as a rune.
|
||||
func (p *Parser) Rune(i int) rune {
|
||||
if i > 0 || p.keepRanges {
|
||||
return p.parseRune(p.getField(i))
|
||||
}
|
||||
return p.rangeStart
|
||||
}
|
||||
|
||||
// Runes interprets and returns field i as a sequence of runes.
|
||||
func (p *Parser) Runes(i int) (runes []rune) {
|
||||
add := func(b []byte) {
|
||||
if b = bytes.TrimSpace(b); len(b) > 0 {
|
||||
runes = append(runes, p.parseRune(b))
|
||||
}
|
||||
}
|
||||
for b := p.getField(i); ; {
|
||||
i := bytes.IndexByte(b, ' ')
|
||||
if i == -1 {
|
||||
add(b)
|
||||
break
|
||||
}
|
||||
add(b[:i])
|
||||
b = b[i+1:]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
errIncorrectLegacyRange = errors.New("ucd: unmatched <* First>")
|
||||
|
||||
// reRange matches one line of a legacy rune range.
|
||||
reRange = regexp.MustCompile("^([0-9A-F]*);<([^,]*), ([^>]*)>(.*)$")
|
||||
)
|
||||
|
||||
// Range parses and returns field i as a rune range. A range is inclusive at
|
||||
// both ends. If the field only has one rune, first and last will be identical.
|
||||
// It supports the legacy format for ranges used in UnicodeData.txt.
|
||||
func (p *Parser) Range(i int) (first, last rune) {
|
||||
if !p.keepRanges {
|
||||
return p.rangeStart, p.rangeStart
|
||||
}
|
||||
return p.getRange(i)
|
||||
}
|
||||
|
||||
func (p *Parser) getRange(i int) (first, last rune) {
|
||||
b := p.getField(i)
|
||||
if k := bytes.Index(b, []byte("..")); k != -1 {
|
||||
return p.parseRune(b[:k]), p.parseRune(b[k+2:])
|
||||
}
|
||||
// The first field may not be a rune, in which case we may ignore any error
|
||||
// and set the range as 0..0.
|
||||
x, err := parseRune(b)
|
||||
if err != nil {
|
||||
// Disable range parsing henceforth. This ensures that an error will be
|
||||
// returned if the user subsequently will try to parse this field as
|
||||
// a Rune.
|
||||
p.keepRanges = true
|
||||
}
|
||||
// Special case for UnicodeData that was retained for backwards compatibility.
|
||||
if i == 0 && len(p.field) > 1 && bytes.HasSuffix(p.field[1], []byte("First>")) {
|
||||
if p.parsedRange {
|
||||
return p.rangeStart, p.rangeEnd
|
||||
}
|
||||
mf := reRange.FindStringSubmatch(p.scanner.Text())
|
||||
if mf == nil || !p.scanner.Scan() {
|
||||
p.setError(errIncorrectLegacyRange)
|
||||
return x, x
|
||||
}
|
||||
// Using Bytes would be more efficient here, but Text is a lot easier
|
||||
// and this is not a frequent case.
|
||||
ml := reRange.FindStringSubmatch(p.scanner.Text())
|
||||
if ml == nil || mf[2] != ml[2] || ml[3] != "Last" || mf[4] != ml[4] {
|
||||
p.setError(errIncorrectLegacyRange)
|
||||
return x, x
|
||||
}
|
||||
p.rangeStart, p.rangeEnd = x, p.parseRune(p.scanner.Bytes()[:len(ml[1])])
|
||||
p.parsedRange = true
|
||||
return p.rangeStart, p.rangeEnd
|
||||
}
|
||||
return x, x
|
||||
}
|
||||
|
||||
// bools recognizes all valid UCD boolean values.
|
||||
var bools = map[string]bool{
|
||||
"": false,
|
||||
"N": false,
|
||||
"No": false,
|
||||
"F": false,
|
||||
"False": false,
|
||||
"Y": true,
|
||||
"Yes": true,
|
||||
"T": true,
|
||||
"True": true,
|
||||
}
|
||||
|
||||
// Bool parses and returns field i as a boolean value.
|
||||
func (p *Parser) Bool(i int) bool {
|
||||
b := p.getField(i)
|
||||
for s, v := range bools {
|
||||
if bstrEq(b, s) {
|
||||
return v
|
||||
}
|
||||
}
|
||||
p.setError(strconv.ErrSyntax)
|
||||
return false
|
||||
}
|
||||
|
||||
// Int parses and returns field i as an integer value.
|
||||
func (p *Parser) Int(i int) int {
|
||||
x, err := strconv.ParseInt(string(p.getField(i)), 10, 64)
|
||||
p.setError(err)
|
||||
return int(x)
|
||||
}
|
||||
|
||||
// Uint parses and returns field i as an unsigned integer value.
|
||||
func (p *Parser) Uint(i int) uint {
|
||||
x, err := strconv.ParseUint(string(p.getField(i)), 10, 64)
|
||||
p.setError(err)
|
||||
return uint(x)
|
||||
}
|
||||
|
||||
// Float parses and returns field i as a decimal value.
|
||||
func (p *Parser) Float(i int) float64 {
|
||||
x, err := strconv.ParseFloat(string(p.getField(i)), 64)
|
||||
p.setError(err)
|
||||
return x
|
||||
}
|
||||
|
||||
// String parses and returns field i as a string value.
|
||||
func (p *Parser) String(i int) string {
|
||||
return string(p.getField(i))
|
||||
}
|
||||
|
||||
// Strings parses and returns field i as a space-separated list of strings.
|
||||
func (p *Parser) Strings(i int) []string {
|
||||
ss := strings.Split(string(p.getField(i)), " ")
|
||||
for i, s := range ss {
|
||||
ss[i] = strings.TrimSpace(s)
|
||||
}
|
||||
return ss
|
||||
}
|
||||
|
||||
// Comment returns the comments for the current line.
|
||||
func (p *Parser) Comment() string {
|
||||
return string(p.comment)
|
||||
}
|
||||
|
||||
var errUndefinedEnum = errors.New("ucd: undefined enum value")
|
||||
|
||||
// Enum interprets and returns field i as a value that must be one of the values
|
||||
// in enum.
|
||||
func (p *Parser) Enum(i int, enum ...string) string {
|
||||
b := p.getField(i)
|
||||
for _, s := range enum {
|
||||
if bstrEq(b, s) {
|
||||
return s
|
||||
}
|
||||
}
|
||||
p.setError(errUndefinedEnum)
|
||||
return ""
|
||||
}
|
||||
|
||||
func bstrEq(b []byte, s string) bool {
|
||||
if len(b) != len(s) {
|
||||
return false
|
||||
}
|
||||
for i, c := range b {
|
||||
if c != s[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
342
vendor/golang.org/x/text/secure/bidirule/bidirule.go
generated
vendored
Normal file
342
vendor/golang.org/x/text/secure/bidirule/bidirule.go
generated
vendored
Normal file
@@ -0,0 +1,342 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package bidirule implements the Bidi Rule defined by RFC 5893.
|
||||
//
|
||||
// This package is under development. The API may change without notice and
|
||||
// without preserving backward compatibility.
|
||||
package bidirule
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"unicode/utf8"
|
||||
|
||||
"golang.org/x/text/transform"
|
||||
"golang.org/x/text/unicode/bidi"
|
||||
)
|
||||
|
||||
// This file contains an implementation of RFC 5893: Right-to-Left Scripts for
|
||||
// Internationalized Domain Names for Applications (IDNA)
|
||||
//
|
||||
// A label is an individual component of a domain name. Labels are usually
|
||||
// shown separated by dots; for example, the domain name "www.example.com" is
|
||||
// composed of three labels: "www", "example", and "com".
|
||||
//
|
||||
// An RTL label is a label that contains at least one character of class R, AL,
|
||||
// or AN. An LTR label is any label that is not an RTL label.
|
||||
//
|
||||
// A "Bidi domain name" is a domain name that contains at least one RTL label.
|
||||
//
|
||||
// The following guarantees can be made based on the above:
|
||||
//
|
||||
// o In a domain name consisting of only labels that satisfy the rule,
|
||||
// the requirements of Section 3 are satisfied. Note that even LTR
|
||||
// labels and pure ASCII labels have to be tested.
|
||||
//
|
||||
// o In a domain name consisting of only LDH labels (as defined in the
|
||||
// Definitions document [RFC5890]) and labels that satisfy the rule,
|
||||
// the requirements of Section 3 are satisfied as long as a label
|
||||
// that starts with an ASCII digit does not come after a
|
||||
// right-to-left label.
|
||||
//
|
||||
// No guarantee is given for other combinations.
|
||||
|
||||
// ErrInvalid indicates a label is invalid according to the Bidi Rule.
|
||||
var ErrInvalid = errors.New("bidirule: failed Bidi Rule")
|
||||
|
||||
type ruleState uint8
|
||||
|
||||
const (
|
||||
ruleInitial ruleState = iota
|
||||
ruleLTR
|
||||
ruleLTRFinal
|
||||
ruleRTL
|
||||
ruleRTLFinal
|
||||
ruleInvalid
|
||||
)
|
||||
|
||||
type ruleTransition struct {
|
||||
next ruleState
|
||||
mask uint16
|
||||
}
|
||||
|
||||
var transitions = [...][2]ruleTransition{
|
||||
// [2.1] The first character must be a character with Bidi property L, R, or
|
||||
// AL. If it has the R or AL property, it is an RTL label; if it has the L
|
||||
// property, it is an LTR label.
|
||||
ruleInitial: {
|
||||
{ruleLTRFinal, 1 << bidi.L},
|
||||
{ruleRTLFinal, 1<<bidi.R | 1<<bidi.AL},
|
||||
},
|
||||
ruleRTL: {
|
||||
// [2.3] In an RTL label, the end of the label must be a character with
|
||||
// Bidi property R, AL, EN, or AN, followed by zero or more characters
|
||||
// with Bidi property NSM.
|
||||
{ruleRTLFinal, 1<<bidi.R | 1<<bidi.AL | 1<<bidi.EN | 1<<bidi.AN},
|
||||
|
||||
// [2.2] In an RTL label, only characters with the Bidi properties R,
|
||||
// AL, AN, EN, ES, CS, ET, ON, BN, or NSM are allowed.
|
||||
// We exclude the entries from [2.3]
|
||||
{ruleRTL, 1<<bidi.ES | 1<<bidi.CS | 1<<bidi.ET | 1<<bidi.ON | 1<<bidi.BN | 1<<bidi.NSM},
|
||||
},
|
||||
ruleRTLFinal: {
|
||||
// [2.3] In an RTL label, the end of the label must be a character with
|
||||
// Bidi property R, AL, EN, or AN, followed by zero or more characters
|
||||
// with Bidi property NSM.
|
||||
{ruleRTLFinal, 1<<bidi.R | 1<<bidi.AL | 1<<bidi.EN | 1<<bidi.AN | 1<<bidi.NSM},
|
||||
|
||||
// [2.2] In an RTL label, only characters with the Bidi properties R,
|
||||
// AL, AN, EN, ES, CS, ET, ON, BN, or NSM are allowed.
|
||||
// We exclude the entries from [2.3] and NSM.
|
||||
{ruleRTL, 1<<bidi.ES | 1<<bidi.CS | 1<<bidi.ET | 1<<bidi.ON | 1<<bidi.BN},
|
||||
},
|
||||
ruleLTR: {
|
||||
// [2.6] In an LTR label, the end of the label must be a character with
|
||||
// Bidi property L or EN, followed by zero or more characters with Bidi
|
||||
// property NSM.
|
||||
{ruleLTRFinal, 1<<bidi.L | 1<<bidi.EN},
|
||||
|
||||
// [2.5] In an LTR label, only characters with the Bidi properties L,
|
||||
// EN, ES, CS, ET, ON, BN, or NSM are allowed.
|
||||
// We exclude the entries from [2.6].
|
||||
{ruleLTR, 1<<bidi.ES | 1<<bidi.CS | 1<<bidi.ET | 1<<bidi.ON | 1<<bidi.BN | 1<<bidi.NSM},
|
||||
},
|
||||
ruleLTRFinal: {
|
||||
// [2.6] In an LTR label, the end of the label must be a character with
|
||||
// Bidi property L or EN, followed by zero or more characters with Bidi
|
||||
// property NSM.
|
||||
{ruleLTRFinal, 1<<bidi.L | 1<<bidi.EN | 1<<bidi.NSM},
|
||||
|
||||
// [2.5] In an LTR label, only characters with the Bidi properties L,
|
||||
// EN, ES, CS, ET, ON, BN, or NSM are allowed.
|
||||
// We exclude the entries from [2.6].
|
||||
{ruleLTR, 1<<bidi.ES | 1<<bidi.CS | 1<<bidi.ET | 1<<bidi.ON | 1<<bidi.BN},
|
||||
},
|
||||
ruleInvalid: {
|
||||
{ruleInvalid, 0},
|
||||
{ruleInvalid, 0},
|
||||
},
|
||||
}
|
||||
|
||||
// [2.4] In an RTL label, if an EN is present, no AN may be present, and
|
||||
// vice versa.
|
||||
const exclusiveRTL = uint16(1<<bidi.EN | 1<<bidi.AN)
|
||||
|
||||
// From RFC 5893
|
||||
// An RTL label is a label that contains at least one character of type
|
||||
// R, AL, or AN.
|
||||
//
|
||||
// An LTR label is any label that is not an RTL label.
|
||||
|
||||
// Direction reports the direction of the given label as defined by RFC 5893.
|
||||
// The Bidi Rule does not have to be applied to labels of the category
|
||||
// LeftToRight.
|
||||
func Direction(b []byte) bidi.Direction {
|
||||
for i := 0; i < len(b); {
|
||||
e, sz := bidi.Lookup(b[i:])
|
||||
if sz == 0 {
|
||||
i++
|
||||
}
|
||||
c := e.Class()
|
||||
if c == bidi.R || c == bidi.AL || c == bidi.AN {
|
||||
return bidi.RightToLeft
|
||||
}
|
||||
i += sz
|
||||
}
|
||||
return bidi.LeftToRight
|
||||
}
|
||||
|
||||
// DirectionString reports the direction of the given label as defined by RFC
|
||||
// 5893. The Bidi Rule does not have to be applied to labels of the category
|
||||
// LeftToRight.
|
||||
func DirectionString(s string) bidi.Direction {
|
||||
for i := 0; i < len(s); {
|
||||
e, sz := bidi.LookupString(s[i:])
|
||||
if sz == 0 {
|
||||
i++
|
||||
}
|
||||
c := e.Class()
|
||||
if c == bidi.R || c == bidi.AL || c == bidi.AN {
|
||||
return bidi.RightToLeft
|
||||
}
|
||||
i += sz
|
||||
}
|
||||
return bidi.LeftToRight
|
||||
}
|
||||
|
||||
// Valid reports whether b conforms to the BiDi rule.
|
||||
func Valid(b []byte) bool {
|
||||
var t Transformer
|
||||
if n, ok := t.advance(b); !ok || n < len(b) {
|
||||
return false
|
||||
}
|
||||
return t.isFinal()
|
||||
}
|
||||
|
||||
// ValidString reports whether s conforms to the BiDi rule.
|
||||
func ValidString(s string) bool {
|
||||
var t Transformer
|
||||
if n, ok := t.advanceString(s); !ok || n < len(s) {
|
||||
return false
|
||||
}
|
||||
return t.isFinal()
|
||||
}
|
||||
|
||||
// New returns a Transformer that verifies that input adheres to the Bidi Rule.
|
||||
func New() *Transformer {
|
||||
return &Transformer{}
|
||||
}
|
||||
|
||||
// Transformer implements transform.Transform.
|
||||
type Transformer struct {
|
||||
state ruleState
|
||||
hasRTL bool
|
||||
seen uint16
|
||||
}
|
||||
|
||||
// A rule can only be violated for "Bidi Domain names", meaning if one of the
|
||||
// following categories has been observed.
|
||||
func (t *Transformer) isRTL() bool {
|
||||
const isRTL = 1<<bidi.R | 1<<bidi.AL | 1<<bidi.AN
|
||||
return t.seen&isRTL != 0
|
||||
}
|
||||
|
||||
func (t *Transformer) isFinal() bool {
|
||||
if !t.isRTL() {
|
||||
return true
|
||||
}
|
||||
return t.state == ruleLTRFinal || t.state == ruleRTLFinal || t.state == ruleInitial
|
||||
}
|
||||
|
||||
// Reset implements transform.Transformer.
|
||||
func (t *Transformer) Reset() { *t = Transformer{} }
|
||||
|
||||
// Transform implements transform.Transformer. This Transformer has state and
|
||||
// needs to be reset between uses.
|
||||
func (t *Transformer) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||
if len(dst) < len(src) {
|
||||
src = src[:len(dst)]
|
||||
atEOF = false
|
||||
err = transform.ErrShortDst
|
||||
}
|
||||
n, err1 := t.Span(src, atEOF)
|
||||
copy(dst, src[:n])
|
||||
if err == nil || err1 != nil && err1 != transform.ErrShortSrc {
|
||||
err = err1
|
||||
}
|
||||
return n, n, err
|
||||
}
|
||||
|
||||
// Span returns the first n bytes of src that conform to the Bidi rule.
|
||||
func (t *Transformer) Span(src []byte, atEOF bool) (n int, err error) {
|
||||
if t.state == ruleInvalid && t.isRTL() {
|
||||
return 0, ErrInvalid
|
||||
}
|
||||
n, ok := t.advance(src)
|
||||
switch {
|
||||
case !ok:
|
||||
err = ErrInvalid
|
||||
case n < len(src):
|
||||
if !atEOF {
|
||||
err = transform.ErrShortSrc
|
||||
break
|
||||
}
|
||||
err = ErrInvalid
|
||||
case !t.isFinal():
|
||||
err = ErrInvalid
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Precomputing the ASCII values decreases running time for the ASCII fast path
|
||||
// by about 30%.
|
||||
var asciiTable [128]bidi.Properties
|
||||
|
||||
func init() {
|
||||
for i := range asciiTable {
|
||||
p, _ := bidi.LookupRune(rune(i))
|
||||
asciiTable[i] = p
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Transformer) advance(s []byte) (n int, ok bool) {
|
||||
var e bidi.Properties
|
||||
var sz int
|
||||
for n < len(s) {
|
||||
if s[n] < utf8.RuneSelf {
|
||||
e, sz = asciiTable[s[n]], 1
|
||||
} else {
|
||||
e, sz = bidi.Lookup(s[n:])
|
||||
if sz <= 1 {
|
||||
if sz == 1 {
|
||||
// We always consider invalid UTF-8 to be invalid, even if
|
||||
// the string has not yet been determined to be RTL.
|
||||
// TODO: is this correct?
|
||||
return n, false
|
||||
}
|
||||
return n, true // incomplete UTF-8 encoding
|
||||
}
|
||||
}
|
||||
// TODO: using CompactClass would result in noticeable speedup.
|
||||
// See unicode/bidi/prop.go:Properties.CompactClass.
|
||||
c := uint16(1 << e.Class())
|
||||
t.seen |= c
|
||||
if t.seen&exclusiveRTL == exclusiveRTL {
|
||||
t.state = ruleInvalid
|
||||
return n, false
|
||||
}
|
||||
switch tr := transitions[t.state]; {
|
||||
case tr[0].mask&c != 0:
|
||||
t.state = tr[0].next
|
||||
case tr[1].mask&c != 0:
|
||||
t.state = tr[1].next
|
||||
default:
|
||||
t.state = ruleInvalid
|
||||
if t.isRTL() {
|
||||
return n, false
|
||||
}
|
||||
}
|
||||
n += sz
|
||||
}
|
||||
return n, true
|
||||
}
|
||||
|
||||
func (t *Transformer) advanceString(s string) (n int, ok bool) {
|
||||
var e bidi.Properties
|
||||
var sz int
|
||||
for n < len(s) {
|
||||
if s[n] < utf8.RuneSelf {
|
||||
e, sz = asciiTable[s[n]], 1
|
||||
} else {
|
||||
e, sz = bidi.LookupString(s[n:])
|
||||
if sz <= 1 {
|
||||
if sz == 1 {
|
||||
return n, false // invalid UTF-8
|
||||
}
|
||||
return n, true // incomplete UTF-8 encoding
|
||||
}
|
||||
}
|
||||
// TODO: using CompactClass results in noticeable speedup.
|
||||
// See unicode/bidi/prop.go:Properties.CompactClass.
|
||||
c := uint16(1 << e.Class())
|
||||
t.seen |= c
|
||||
if t.seen&exclusiveRTL == exclusiveRTL {
|
||||
t.state = ruleInvalid
|
||||
return n, false
|
||||
}
|
||||
switch tr := transitions[t.state]; {
|
||||
case tr[0].mask&c != 0:
|
||||
t.state = tr[0].next
|
||||
case tr[1].mask&c != 0:
|
||||
t.state = tr[1].next
|
||||
default:
|
||||
t.state = ruleInvalid
|
||||
if t.isRTL() {
|
||||
return n, false
|
||||
}
|
||||
}
|
||||
n += sz
|
||||
}
|
||||
return n, true
|
||||
}
|
||||
705
vendor/golang.org/x/text/transform/transform.go
generated
vendored
Normal file
705
vendor/golang.org/x/text/transform/transform.go
generated
vendored
Normal file
@@ -0,0 +1,705 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package transform provides reader and writer wrappers that transform the
|
||||
// bytes passing through as well as various transformations. Example
|
||||
// transformations provided by other packages include normalization and
|
||||
// conversion between character sets.
|
||||
package transform // import "golang.org/x/text/transform"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
var (
|
||||
// ErrShortDst means that the destination buffer was too short to
|
||||
// receive all of the transformed bytes.
|
||||
ErrShortDst = errors.New("transform: short destination buffer")
|
||||
|
||||
// ErrShortSrc means that the source buffer has insufficient data to
|
||||
// complete the transformation.
|
||||
ErrShortSrc = errors.New("transform: short source buffer")
|
||||
|
||||
// ErrEndOfSpan means that the input and output (the transformed input)
|
||||
// are not identical.
|
||||
ErrEndOfSpan = errors.New("transform: input and output are not identical")
|
||||
|
||||
// errInconsistentByteCount means that Transform returned success (nil
|
||||
// error) but also returned nSrc inconsistent with the src argument.
|
||||
errInconsistentByteCount = errors.New("transform: inconsistent byte count returned")
|
||||
|
||||
// errShortInternal means that an internal buffer is not large enough
|
||||
// to make progress and the Transform operation must be aborted.
|
||||
errShortInternal = errors.New("transform: short internal buffer")
|
||||
)
|
||||
|
||||
// Transformer transforms bytes.
|
||||
type Transformer interface {
|
||||
// Transform writes to dst the transformed bytes read from src, and
|
||||
// returns the number of dst bytes written and src bytes read. The
|
||||
// atEOF argument tells whether src represents the last bytes of the
|
||||
// input.
|
||||
//
|
||||
// Callers should always process the nDst bytes produced and account
|
||||
// for the nSrc bytes consumed before considering the error err.
|
||||
//
|
||||
// A nil error means that all of the transformed bytes (whether freshly
|
||||
// transformed from src or left over from previous Transform calls)
|
||||
// were written to dst. A nil error can be returned regardless of
|
||||
// whether atEOF is true. If err is nil then nSrc must equal len(src);
|
||||
// the converse is not necessarily true.
|
||||
//
|
||||
// ErrShortDst means that dst was too short to receive all of the
|
||||
// transformed bytes. ErrShortSrc means that src had insufficient data
|
||||
// to complete the transformation. If both conditions apply, then
|
||||
// either error may be returned. Other than the error conditions listed
|
||||
// here, implementations are free to report other errors that arise.
|
||||
Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error)
|
||||
|
||||
// Reset resets the state and allows a Transformer to be reused.
|
||||
Reset()
|
||||
}
|
||||
|
||||
// SpanningTransformer extends the Transformer interface with a Span method
|
||||
// that determines how much of the input already conforms to the Transformer.
|
||||
type SpanningTransformer interface {
|
||||
Transformer
|
||||
|
||||
// Span returns a position in src such that transforming src[:n] results in
|
||||
// identical output src[:n] for these bytes. It does not necessarily return
|
||||
// the largest such n. The atEOF argument tells whether src represents the
|
||||
// last bytes of the input.
|
||||
//
|
||||
// Callers should always account for the n bytes consumed before
|
||||
// considering the error err.
|
||||
//
|
||||
// A nil error means that all input bytes are known to be identical to the
|
||||
// output produced by the Transformer. A nil error can be be returned
|
||||
// regardless of whether atEOF is true. If err is nil, then then n must
|
||||
// equal len(src); the converse is not necessarily true.
|
||||
//
|
||||
// ErrEndOfSpan means that the Transformer output may differ from the
|
||||
// input after n bytes. Note that n may be len(src), meaning that the output
|
||||
// would contain additional bytes after otherwise identical output.
|
||||
// ErrShortSrc means that src had insufficient data to determine whether the
|
||||
// remaining bytes would change. Other than the error conditions listed
|
||||
// here, implementations are free to report other errors that arise.
|
||||
//
|
||||
// Calling Span can modify the Transformer state as a side effect. In
|
||||
// effect, it does the transformation just as calling Transform would, only
|
||||
// without copying to a destination buffer and only up to a point it can
|
||||
// determine the input and output bytes are the same. This is obviously more
|
||||
// limited than calling Transform, but can be more efficient in terms of
|
||||
// copying and allocating buffers. Calls to Span and Transform may be
|
||||
// interleaved.
|
||||
Span(src []byte, atEOF bool) (n int, err error)
|
||||
}
|
||||
|
||||
// NopResetter can be embedded by implementations of Transformer to add a nop
|
||||
// Reset method.
|
||||
type NopResetter struct{}
|
||||
|
||||
// Reset implements the Reset method of the Transformer interface.
|
||||
func (NopResetter) Reset() {}
|
||||
|
||||
// Reader wraps another io.Reader by transforming the bytes read.
|
||||
type Reader struct {
|
||||
r io.Reader
|
||||
t Transformer
|
||||
err error
|
||||
|
||||
// dst[dst0:dst1] contains bytes that have been transformed by t but
|
||||
// not yet copied out via Read.
|
||||
dst []byte
|
||||
dst0, dst1 int
|
||||
|
||||
// src[src0:src1] contains bytes that have been read from r but not
|
||||
// yet transformed through t.
|
||||
src []byte
|
||||
src0, src1 int
|
||||
|
||||
// transformComplete is whether the transformation is complete,
|
||||
// regardless of whether or not it was successful.
|
||||
transformComplete bool
|
||||
}
|
||||
|
||||
const defaultBufSize = 4096
|
||||
|
||||
// NewReader returns a new Reader that wraps r by transforming the bytes read
|
||||
// via t. It calls Reset on t.
|
||||
func NewReader(r io.Reader, t Transformer) *Reader {
|
||||
t.Reset()
|
||||
return &Reader{
|
||||
r: r,
|
||||
t: t,
|
||||
dst: make([]byte, defaultBufSize),
|
||||
src: make([]byte, defaultBufSize),
|
||||
}
|
||||
}
|
||||
|
||||
// Read implements the io.Reader interface.
|
||||
func (r *Reader) Read(p []byte) (int, error) {
|
||||
n, err := 0, error(nil)
|
||||
for {
|
||||
// Copy out any transformed bytes and return the final error if we are done.
|
||||
if r.dst0 != r.dst1 {
|
||||
n = copy(p, r.dst[r.dst0:r.dst1])
|
||||
r.dst0 += n
|
||||
if r.dst0 == r.dst1 && r.transformComplete {
|
||||
return n, r.err
|
||||
}
|
||||
return n, nil
|
||||
} else if r.transformComplete {
|
||||
return 0, r.err
|
||||
}
|
||||
|
||||
// Try to transform some source bytes, or to flush the transformer if we
|
||||
// are out of source bytes. We do this even if r.r.Read returned an error.
|
||||
// As the io.Reader documentation says, "process the n > 0 bytes returned
|
||||
// before considering the error".
|
||||
if r.src0 != r.src1 || r.err != nil {
|
||||
r.dst0 = 0
|
||||
r.dst1, n, err = r.t.Transform(r.dst, r.src[r.src0:r.src1], r.err == io.EOF)
|
||||
r.src0 += n
|
||||
|
||||
switch {
|
||||
case err == nil:
|
||||
if r.src0 != r.src1 {
|
||||
r.err = errInconsistentByteCount
|
||||
}
|
||||
// The Transform call was successful; we are complete if we
|
||||
// cannot read more bytes into src.
|
||||
r.transformComplete = r.err != nil
|
||||
continue
|
||||
case err == ErrShortDst && (r.dst1 != 0 || n != 0):
|
||||
// Make room in dst by copying out, and try again.
|
||||
continue
|
||||
case err == ErrShortSrc && r.src1-r.src0 != len(r.src) && r.err == nil:
|
||||
// Read more bytes into src via the code below, and try again.
|
||||
default:
|
||||
r.transformComplete = true
|
||||
// The reader error (r.err) takes precedence over the
|
||||
// transformer error (err) unless r.err is nil or io.EOF.
|
||||
if r.err == nil || r.err == io.EOF {
|
||||
r.err = err
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Move any untransformed source bytes to the start of the buffer
|
||||
// and read more bytes.
|
||||
if r.src0 != 0 {
|
||||
r.src0, r.src1 = 0, copy(r.src, r.src[r.src0:r.src1])
|
||||
}
|
||||
n, r.err = r.r.Read(r.src[r.src1:])
|
||||
r.src1 += n
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: implement ReadByte (and ReadRune??).
|
||||
|
||||
// Writer wraps another io.Writer by transforming the bytes read.
|
||||
// The user needs to call Close to flush unwritten bytes that may
|
||||
// be buffered.
|
||||
type Writer struct {
|
||||
w io.Writer
|
||||
t Transformer
|
||||
dst []byte
|
||||
|
||||
// src[:n] contains bytes that have not yet passed through t.
|
||||
src []byte
|
||||
n int
|
||||
}
|
||||
|
||||
// NewWriter returns a new Writer that wraps w by transforming the bytes written
|
||||
// via t. It calls Reset on t.
|
||||
func NewWriter(w io.Writer, t Transformer) *Writer {
|
||||
t.Reset()
|
||||
return &Writer{
|
||||
w: w,
|
||||
t: t,
|
||||
dst: make([]byte, defaultBufSize),
|
||||
src: make([]byte, defaultBufSize),
|
||||
}
|
||||
}
|
||||
|
||||
// Write implements the io.Writer interface. If there are not enough
|
||||
// bytes available to complete a Transform, the bytes will be buffered
|
||||
// for the next write. Call Close to convert the remaining bytes.
|
||||
func (w *Writer) Write(data []byte) (n int, err error) {
|
||||
src := data
|
||||
if w.n > 0 {
|
||||
// Append bytes from data to the last remainder.
|
||||
// TODO: limit the amount copied on first try.
|
||||
n = copy(w.src[w.n:], data)
|
||||
w.n += n
|
||||
src = w.src[:w.n]
|
||||
}
|
||||
for {
|
||||
nDst, nSrc, err := w.t.Transform(w.dst, src, false)
|
||||
if _, werr := w.w.Write(w.dst[:nDst]); werr != nil {
|
||||
return n, werr
|
||||
}
|
||||
src = src[nSrc:]
|
||||
if w.n == 0 {
|
||||
n += nSrc
|
||||
} else if len(src) <= n {
|
||||
// Enough bytes from w.src have been consumed. We make src point
|
||||
// to data instead to reduce the copying.
|
||||
w.n = 0
|
||||
n -= len(src)
|
||||
src = data[n:]
|
||||
if n < len(data) && (err == nil || err == ErrShortSrc) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
switch err {
|
||||
case ErrShortDst:
|
||||
// This error is okay as long as we are making progress.
|
||||
if nDst > 0 || nSrc > 0 {
|
||||
continue
|
||||
}
|
||||
case ErrShortSrc:
|
||||
if len(src) < len(w.src) {
|
||||
m := copy(w.src, src)
|
||||
// If w.n > 0, bytes from data were already copied to w.src and n
|
||||
// was already set to the number of bytes consumed.
|
||||
if w.n == 0 {
|
||||
n += m
|
||||
}
|
||||
w.n = m
|
||||
err = nil
|
||||
} else if nDst > 0 || nSrc > 0 {
|
||||
// Not enough buffer to store the remainder. Keep processing as
|
||||
// long as there is progress. Without this case, transforms that
|
||||
// require a lookahead larger than the buffer may result in an
|
||||
// error. This is not something one may expect to be common in
|
||||
// practice, but it may occur when buffers are set to small
|
||||
// sizes during testing.
|
||||
continue
|
||||
}
|
||||
case nil:
|
||||
if w.n > 0 {
|
||||
err = errInconsistentByteCount
|
||||
}
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
}
|
||||
|
||||
// Close implements the io.Closer interface.
|
||||
func (w *Writer) Close() error {
|
||||
src := w.src[:w.n]
|
||||
for {
|
||||
nDst, nSrc, err := w.t.Transform(w.dst, src, true)
|
||||
if _, werr := w.w.Write(w.dst[:nDst]); werr != nil {
|
||||
return werr
|
||||
}
|
||||
if err != ErrShortDst {
|
||||
return err
|
||||
}
|
||||
src = src[nSrc:]
|
||||
}
|
||||
}
|
||||
|
||||
type nop struct{ NopResetter }
|
||||
|
||||
func (nop) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||
n := copy(dst, src)
|
||||
if n < len(src) {
|
||||
err = ErrShortDst
|
||||
}
|
||||
return n, n, err
|
||||
}
|
||||
|
||||
func (nop) Span(src []byte, atEOF bool) (n int, err error) {
|
||||
return len(src), nil
|
||||
}
|
||||
|
||||
type discard struct{ NopResetter }
|
||||
|
||||
func (discard) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||
return 0, len(src), nil
|
||||
}
|
||||
|
||||
var (
|
||||
// Discard is a Transformer for which all Transform calls succeed
|
||||
// by consuming all bytes and writing nothing.
|
||||
Discard Transformer = discard{}
|
||||
|
||||
// Nop is a SpanningTransformer that copies src to dst.
|
||||
Nop SpanningTransformer = nop{}
|
||||
)
|
||||
|
||||
// chain is a sequence of links. A chain with N Transformers has N+1 links and
|
||||
// N+1 buffers. Of those N+1 buffers, the first and last are the src and dst
|
||||
// buffers given to chain.Transform and the middle N-1 buffers are intermediate
|
||||
// buffers owned by the chain. The i'th link transforms bytes from the i'th
|
||||
// buffer chain.link[i].b at read offset chain.link[i].p to the i+1'th buffer
|
||||
// chain.link[i+1].b at write offset chain.link[i+1].n, for i in [0, N).
|
||||
type chain struct {
|
||||
link []link
|
||||
err error
|
||||
// errStart is the index at which the error occurred plus 1. Processing
|
||||
// errStart at this level at the next call to Transform. As long as
|
||||
// errStart > 0, chain will not consume any more source bytes.
|
||||
errStart int
|
||||
}
|
||||
|
||||
func (c *chain) fatalError(errIndex int, err error) {
|
||||
if i := errIndex + 1; i > c.errStart {
|
||||
c.errStart = i
|
||||
c.err = err
|
||||
}
|
||||
}
|
||||
|
||||
type link struct {
|
||||
t Transformer
|
||||
// b[p:n] holds the bytes to be transformed by t.
|
||||
b []byte
|
||||
p int
|
||||
n int
|
||||
}
|
||||
|
||||
func (l *link) src() []byte {
|
||||
return l.b[l.p:l.n]
|
||||
}
|
||||
|
||||
func (l *link) dst() []byte {
|
||||
return l.b[l.n:]
|
||||
}
|
||||
|
||||
// Chain returns a Transformer that applies t in sequence.
|
||||
func Chain(t ...Transformer) Transformer {
|
||||
if len(t) == 0 {
|
||||
return nop{}
|
||||
}
|
||||
c := &chain{link: make([]link, len(t)+1)}
|
||||
for i, tt := range t {
|
||||
c.link[i].t = tt
|
||||
}
|
||||
// Allocate intermediate buffers.
|
||||
b := make([][defaultBufSize]byte, len(t)-1)
|
||||
for i := range b {
|
||||
c.link[i+1].b = b[i][:]
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// Reset resets the state of Chain. It calls Reset on all the Transformers.
|
||||
func (c *chain) Reset() {
|
||||
for i, l := range c.link {
|
||||
if l.t != nil {
|
||||
l.t.Reset()
|
||||
}
|
||||
c.link[i].p, c.link[i].n = 0, 0
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: make chain use Span (is going to be fun to implement!)
|
||||
|
||||
// Transform applies the transformers of c in sequence.
|
||||
func (c *chain) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||
// Set up src and dst in the chain.
|
||||
srcL := &c.link[0]
|
||||
dstL := &c.link[len(c.link)-1]
|
||||
srcL.b, srcL.p, srcL.n = src, 0, len(src)
|
||||
dstL.b, dstL.n = dst, 0
|
||||
var lastFull, needProgress bool // for detecting progress
|
||||
|
||||
// i is the index of the next Transformer to apply, for i in [low, high].
|
||||
// low is the lowest index for which c.link[low] may still produce bytes.
|
||||
// high is the highest index for which c.link[high] has a Transformer.
|
||||
// The error returned by Transform determines whether to increase or
|
||||
// decrease i. We try to completely fill a buffer before converting it.
|
||||
for low, i, high := c.errStart, c.errStart, len(c.link)-2; low <= i && i <= high; {
|
||||
in, out := &c.link[i], &c.link[i+1]
|
||||
nDst, nSrc, err0 := in.t.Transform(out.dst(), in.src(), atEOF && low == i)
|
||||
out.n += nDst
|
||||
in.p += nSrc
|
||||
if i > 0 && in.p == in.n {
|
||||
in.p, in.n = 0, 0
|
||||
}
|
||||
needProgress, lastFull = lastFull, false
|
||||
switch err0 {
|
||||
case ErrShortDst:
|
||||
// Process the destination buffer next. Return if we are already
|
||||
// at the high index.
|
||||
if i == high {
|
||||
return dstL.n, srcL.p, ErrShortDst
|
||||
}
|
||||
if out.n != 0 {
|
||||
i++
|
||||
// If the Transformer at the next index is not able to process any
|
||||
// source bytes there is nothing that can be done to make progress
|
||||
// and the bytes will remain unprocessed. lastFull is used to
|
||||
// detect this and break out of the loop with a fatal error.
|
||||
lastFull = true
|
||||
continue
|
||||
}
|
||||
// The destination buffer was too small, but is completely empty.
|
||||
// Return a fatal error as this transformation can never complete.
|
||||
c.fatalError(i, errShortInternal)
|
||||
case ErrShortSrc:
|
||||
if i == 0 {
|
||||
// Save ErrShortSrc in err. All other errors take precedence.
|
||||
err = ErrShortSrc
|
||||
break
|
||||
}
|
||||
// Source bytes were depleted before filling up the destination buffer.
|
||||
// Verify we made some progress, move the remaining bytes to the errStart
|
||||
// and try to get more source bytes.
|
||||
if needProgress && nSrc == 0 || in.n-in.p == len(in.b) {
|
||||
// There were not enough source bytes to proceed while the source
|
||||
// buffer cannot hold any more bytes. Return a fatal error as this
|
||||
// transformation can never complete.
|
||||
c.fatalError(i, errShortInternal)
|
||||
break
|
||||
}
|
||||
// in.b is an internal buffer and we can make progress.
|
||||
in.p, in.n = 0, copy(in.b, in.src())
|
||||
fallthrough
|
||||
case nil:
|
||||
// if i == low, we have depleted the bytes at index i or any lower levels.
|
||||
// In that case we increase low and i. In all other cases we decrease i to
|
||||
// fetch more bytes before proceeding to the next index.
|
||||
if i > low {
|
||||
i--
|
||||
continue
|
||||
}
|
||||
default:
|
||||
c.fatalError(i, err0)
|
||||
}
|
||||
// Exhausted level low or fatal error: increase low and continue
|
||||
// to process the bytes accepted so far.
|
||||
i++
|
||||
low = i
|
||||
}
|
||||
|
||||
// If c.errStart > 0, this means we found a fatal error. We will clear
|
||||
// all upstream buffers. At this point, no more progress can be made
|
||||
// downstream, as Transform would have bailed while handling ErrShortDst.
|
||||
if c.errStart > 0 {
|
||||
for i := 1; i < c.errStart; i++ {
|
||||
c.link[i].p, c.link[i].n = 0, 0
|
||||
}
|
||||
err, c.errStart, c.err = c.err, 0, nil
|
||||
}
|
||||
return dstL.n, srcL.p, err
|
||||
}
|
||||
|
||||
// Deprecated: use runes.Remove instead.
|
||||
func RemoveFunc(f func(r rune) bool) Transformer {
|
||||
return removeF(f)
|
||||
}
|
||||
|
||||
type removeF func(r rune) bool
|
||||
|
||||
func (removeF) Reset() {}
|
||||
|
||||
// Transform implements the Transformer interface.
|
||||
func (t removeF) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||
for r, sz := rune(0), 0; len(src) > 0; src = src[sz:] {
|
||||
|
||||
if r = rune(src[0]); r < utf8.RuneSelf {
|
||||
sz = 1
|
||||
} else {
|
||||
r, sz = utf8.DecodeRune(src)
|
||||
|
||||
if sz == 1 {
|
||||
// Invalid rune.
|
||||
if !atEOF && !utf8.FullRune(src) {
|
||||
err = ErrShortSrc
|
||||
break
|
||||
}
|
||||
// We replace illegal bytes with RuneError. Not doing so might
|
||||
// otherwise turn a sequence of invalid UTF-8 into valid UTF-8.
|
||||
// The resulting byte sequence may subsequently contain runes
|
||||
// for which t(r) is true that were passed unnoticed.
|
||||
if !t(r) {
|
||||
if nDst+3 > len(dst) {
|
||||
err = ErrShortDst
|
||||
break
|
||||
}
|
||||
nDst += copy(dst[nDst:], "\uFFFD")
|
||||
}
|
||||
nSrc++
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if !t(r) {
|
||||
if nDst+sz > len(dst) {
|
||||
err = ErrShortDst
|
||||
break
|
||||
}
|
||||
nDst += copy(dst[nDst:], src[:sz])
|
||||
}
|
||||
nSrc += sz
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// grow returns a new []byte that is longer than b, and copies the first n bytes
|
||||
// of b to the start of the new slice.
|
||||
func grow(b []byte, n int) []byte {
|
||||
m := len(b)
|
||||
if m <= 32 {
|
||||
m = 64
|
||||
} else if m <= 256 {
|
||||
m *= 2
|
||||
} else {
|
||||
m += m >> 1
|
||||
}
|
||||
buf := make([]byte, m)
|
||||
copy(buf, b[:n])
|
||||
return buf
|
||||
}
|
||||
|
||||
const initialBufSize = 128
|
||||
|
||||
// String returns a string with the result of converting s[:n] using t, where
|
||||
// n <= len(s). If err == nil, n will be len(s). It calls Reset on t.
|
||||
func String(t Transformer, s string) (result string, n int, err error) {
|
||||
t.Reset()
|
||||
if s == "" {
|
||||
// Fast path for the common case for empty input. Results in about a
|
||||
// 86% reduction of running time for BenchmarkStringLowerEmpty.
|
||||
if _, _, err := t.Transform(nil, nil, true); err == nil {
|
||||
return "", 0, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Allocate only once. Note that both dst and src escape when passed to
|
||||
// Transform.
|
||||
buf := [2 * initialBufSize]byte{}
|
||||
dst := buf[:initialBufSize:initialBufSize]
|
||||
src := buf[initialBufSize : 2*initialBufSize]
|
||||
|
||||
// The input string s is transformed in multiple chunks (starting with a
|
||||
// chunk size of initialBufSize). nDst and nSrc are per-chunk (or
|
||||
// per-Transform-call) indexes, pDst and pSrc are overall indexes.
|
||||
nDst, nSrc := 0, 0
|
||||
pDst, pSrc := 0, 0
|
||||
|
||||
// pPrefix is the length of a common prefix: the first pPrefix bytes of the
|
||||
// result will equal the first pPrefix bytes of s. It is not guaranteed to
|
||||
// be the largest such value, but if pPrefix, len(result) and len(s) are
|
||||
// all equal after the final transform (i.e. calling Transform with atEOF
|
||||
// being true returned nil error) then we don't need to allocate a new
|
||||
// result string.
|
||||
pPrefix := 0
|
||||
for {
|
||||
// Invariant: pDst == pPrefix && pSrc == pPrefix.
|
||||
|
||||
n := copy(src, s[pSrc:])
|
||||
nDst, nSrc, err = t.Transform(dst, src[:n], pSrc+n == len(s))
|
||||
pDst += nDst
|
||||
pSrc += nSrc
|
||||
|
||||
// TODO: let transformers implement an optional Spanner interface, akin
|
||||
// to norm's QuickSpan. This would even allow us to avoid any allocation.
|
||||
if !bytes.Equal(dst[:nDst], src[:nSrc]) {
|
||||
break
|
||||
}
|
||||
pPrefix = pSrc
|
||||
if err == ErrShortDst {
|
||||
// A buffer can only be short if a transformer modifies its input.
|
||||
break
|
||||
} else if err == ErrShortSrc {
|
||||
if nSrc == 0 {
|
||||
// No progress was made.
|
||||
break
|
||||
}
|
||||
// Equal so far and !atEOF, so continue checking.
|
||||
} else if err != nil || pPrefix == len(s) {
|
||||
return string(s[:pPrefix]), pPrefix, err
|
||||
}
|
||||
}
|
||||
// Post-condition: pDst == pPrefix + nDst && pSrc == pPrefix + nSrc.
|
||||
|
||||
// We have transformed the first pSrc bytes of the input s to become pDst
|
||||
// transformed bytes. Those transformed bytes are discontiguous: the first
|
||||
// pPrefix of them equal s[:pPrefix] and the last nDst of them equal
|
||||
// dst[:nDst]. We copy them around, into a new dst buffer if necessary, so
|
||||
// that they become one contiguous slice: dst[:pDst].
|
||||
if pPrefix != 0 {
|
||||
newDst := dst
|
||||
if pDst > len(newDst) {
|
||||
newDst = make([]byte, len(s)+nDst-nSrc)
|
||||
}
|
||||
copy(newDst[pPrefix:pDst], dst[:nDst])
|
||||
copy(newDst[:pPrefix], s[:pPrefix])
|
||||
dst = newDst
|
||||
}
|
||||
|
||||
// Prevent duplicate Transform calls with atEOF being true at the end of
|
||||
// the input. Also return if we have an unrecoverable error.
|
||||
if (err == nil && pSrc == len(s)) ||
|
||||
(err != nil && err != ErrShortDst && err != ErrShortSrc) {
|
||||
return string(dst[:pDst]), pSrc, err
|
||||
}
|
||||
|
||||
// Transform the remaining input, growing dst and src buffers as necessary.
|
||||
for {
|
||||
n := copy(src, s[pSrc:])
|
||||
nDst, nSrc, err := t.Transform(dst[pDst:], src[:n], pSrc+n == len(s))
|
||||
pDst += nDst
|
||||
pSrc += nSrc
|
||||
|
||||
// If we got ErrShortDst or ErrShortSrc, do not grow as long as we can
|
||||
// make progress. This may avoid excessive allocations.
|
||||
if err == ErrShortDst {
|
||||
if nDst == 0 {
|
||||
dst = grow(dst, pDst)
|
||||
}
|
||||
} else if err == ErrShortSrc {
|
||||
if nSrc == 0 {
|
||||
src = grow(src, 0)
|
||||
}
|
||||
} else if err != nil || pSrc == len(s) {
|
||||
return string(dst[:pDst]), pSrc, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Bytes returns a new byte slice with the result of converting b[:n] using t,
|
||||
// where n <= len(b). If err == nil, n will be len(b). It calls Reset on t.
|
||||
func Bytes(t Transformer, b []byte) (result []byte, n int, err error) {
|
||||
return doAppend(t, 0, make([]byte, len(b)), b)
|
||||
}
|
||||
|
||||
// Append appends the result of converting src[:n] using t to dst, where
|
||||
// n <= len(src), If err == nil, n will be len(src). It calls Reset on t.
|
||||
func Append(t Transformer, dst, src []byte) (result []byte, n int, err error) {
|
||||
if len(dst) == cap(dst) {
|
||||
n := len(src) + len(dst) // It is okay for this to be 0.
|
||||
b := make([]byte, n)
|
||||
dst = b[:copy(b, dst)]
|
||||
}
|
||||
return doAppend(t, len(dst), dst[:cap(dst)], src)
|
||||
}
|
||||
|
||||
func doAppend(t Transformer, pDst int, dst, src []byte) (result []byte, n int, err error) {
|
||||
t.Reset()
|
||||
pSrc := 0
|
||||
for {
|
||||
nDst, nSrc, err := t.Transform(dst[pDst:], src[pSrc:], true)
|
||||
pDst += nDst
|
||||
pSrc += nSrc
|
||||
if err != ErrShortDst {
|
||||
return dst[:pDst], pSrc, err
|
||||
}
|
||||
|
||||
// Grow the destination buffer, but do not grow as long as we can make
|
||||
// progress. This may avoid excessive allocations.
|
||||
if nDst == 0 {
|
||||
dst = grow(dst, pDst)
|
||||
}
|
||||
}
|
||||
}
|
||||
198
vendor/golang.org/x/text/unicode/bidi/bidi.go
generated
vendored
Normal file
198
vendor/golang.org/x/text/unicode/bidi/bidi.go
generated
vendored
Normal file
@@ -0,0 +1,198 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:generate go run gen.go gen_trieval.go gen_ranges.go
|
||||
|
||||
// Package bidi contains functionality for bidirectional text support.
|
||||
//
|
||||
// See http://www.unicode.org/reports/tr9.
|
||||
//
|
||||
// NOTE: UNDER CONSTRUCTION. This API may change in backwards incompatible ways
|
||||
// and without notice.
|
||||
package bidi // import "golang.org/x/text/unicode/bidi"
|
||||
|
||||
// TODO:
|
||||
// The following functionality would not be hard to implement, but hinges on
|
||||
// the definition of a Segmenter interface. For now this is up to the user.
|
||||
// - Iterate over paragraphs
|
||||
// - Segmenter to iterate over runs directly from a given text.
|
||||
// Also:
|
||||
// - Transformer for reordering?
|
||||
// - Transformer (validator, really) for Bidi Rule.
|
||||
|
||||
// This API tries to avoid dealing with embedding levels for now. Under the hood
|
||||
// these will be computed, but the question is to which extent the user should
|
||||
// know they exist. We should at some point allow the user to specify an
|
||||
// embedding hierarchy, though.
|
||||
|
||||
// A Direction indicates the overall flow of text.
|
||||
type Direction int
|
||||
|
||||
const (
|
||||
// LeftToRight indicates the text contains no right-to-left characters and
|
||||
// that either there are some left-to-right characters or the option
|
||||
// DefaultDirection(LeftToRight) was passed.
|
||||
LeftToRight Direction = iota
|
||||
|
||||
// RightToLeft indicates the text contains no left-to-right characters and
|
||||
// that either there are some right-to-left characters or the option
|
||||
// DefaultDirection(RightToLeft) was passed.
|
||||
RightToLeft
|
||||
|
||||
// Mixed indicates text contains both left-to-right and right-to-left
|
||||
// characters.
|
||||
Mixed
|
||||
|
||||
// Neutral means that text contains no left-to-right and right-to-left
|
||||
// characters and that no default direction has been set.
|
||||
Neutral
|
||||
)
|
||||
|
||||
type options struct{}
|
||||
|
||||
// An Option is an option for Bidi processing.
|
||||
type Option func(*options)
|
||||
|
||||
// ICU allows the user to define embedding levels. This may be used, for example,
|
||||
// to use hierarchical structure of markup languages to define embeddings.
|
||||
// The following option may be a way to expose this functionality in this API.
|
||||
// // LevelFunc sets a function that associates nesting levels with the given text.
|
||||
// // The levels function will be called with monotonically increasing values for p.
|
||||
// func LevelFunc(levels func(p int) int) Option {
|
||||
// panic("unimplemented")
|
||||
// }
|
||||
|
||||
// DefaultDirection sets the default direction for a Paragraph. The direction is
|
||||
// overridden if the text contains directional characters.
|
||||
func DefaultDirection(d Direction) Option {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
// A Paragraph holds a single Paragraph for Bidi processing.
|
||||
type Paragraph struct {
|
||||
// buffers
|
||||
}
|
||||
|
||||
// SetBytes configures p for the given paragraph text. It replaces text
|
||||
// previously set by SetBytes or SetString. If b contains a paragraph separator
|
||||
// it will only process the first paragraph and report the number of bytes
|
||||
// consumed from b including this separator. Error may be non-nil if options are
|
||||
// given.
|
||||
func (p *Paragraph) SetBytes(b []byte, opts ...Option) (n int, err error) {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
// SetString configures p for the given paragraph text. It replaces text
|
||||
// previously set by SetBytes or SetString. If b contains a paragraph separator
|
||||
// it will only process the first paragraph and report the number of bytes
|
||||
// consumed from b including this separator. Error may be non-nil if options are
|
||||
// given.
|
||||
func (p *Paragraph) SetString(s string, opts ...Option) (n int, err error) {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
// IsLeftToRight reports whether the principle direction of rendering for this
|
||||
// paragraphs is left-to-right. If this returns false, the principle direction
|
||||
// of rendering is right-to-left.
|
||||
func (p *Paragraph) IsLeftToRight() bool {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
// Direction returns the direction of the text of this paragraph.
|
||||
//
|
||||
// The direction may be LeftToRight, RightToLeft, Mixed, or Neutral.
|
||||
func (p *Paragraph) Direction() Direction {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
// RunAt reports the Run at the given position of the input text.
|
||||
//
|
||||
// This method can be used for computing line breaks on paragraphs.
|
||||
func (p *Paragraph) RunAt(pos int) Run {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
// Order computes the visual ordering of all the runs in a Paragraph.
|
||||
func (p *Paragraph) Order() (Ordering, error) {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
// Line computes the visual ordering of runs for a single line starting and
|
||||
// ending at the given positions in the original text.
|
||||
func (p *Paragraph) Line(start, end int) (Ordering, error) {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
// An Ordering holds the computed visual order of runs of a Paragraph. Calling
|
||||
// SetBytes or SetString on the originating Paragraph invalidates an Ordering.
|
||||
// The methods of an Ordering should only be called by one goroutine at a time.
|
||||
type Ordering struct{}
|
||||
|
||||
// Direction reports the directionality of the runs.
|
||||
//
|
||||
// The direction may be LeftToRight, RightToLeft, Mixed, or Neutral.
|
||||
func (o *Ordering) Direction() Direction {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
// NumRuns returns the number of runs.
|
||||
func (o *Ordering) NumRuns() int {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
// Run returns the ith run within the ordering.
|
||||
func (o *Ordering) Run(i int) Run {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
// TODO: perhaps with options.
|
||||
// // Reorder creates a reader that reads the runes in visual order per character.
|
||||
// // Modifiers remain after the runes they modify.
|
||||
// func (l *Runs) Reorder() io.Reader {
|
||||
// panic("unimplemented")
|
||||
// }
|
||||
|
||||
// A Run is a continuous sequence of characters of a single direction.
|
||||
type Run struct {
|
||||
}
|
||||
|
||||
// String returns the text of the run in its original order.
|
||||
func (r *Run) String() string {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
// Bytes returns the text of the run in its original order.
|
||||
func (r *Run) Bytes() []byte {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
// TODO: methods for
|
||||
// - Display order
|
||||
// - headers and footers
|
||||
// - bracket replacement.
|
||||
|
||||
// Direction reports the direction of the run.
|
||||
func (r *Run) Direction() Direction {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
// Position of the Run within the text passed to SetBytes or SetString of the
|
||||
// originating Paragraph value.
|
||||
func (r *Run) Pos() (start, end int) {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
// AppendReverse reverses the order of characters of in, appends them to out,
|
||||
// and returns the result. Modifiers will still follow the runes they modify.
|
||||
// Brackets are replaced with their counterparts.
|
||||
func AppendReverse(out, in []byte) []byte {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
// ReverseString reverses the order of characters in s and returns a new string.
|
||||
// Modifiers will still follow the runes they modify. Brackets are replaced with
|
||||
// their counterparts.
|
||||
func ReverseString(s string) string {
|
||||
panic("unimplemented")
|
||||
}
|
||||
335
vendor/golang.org/x/text/unicode/bidi/bracket.go
generated
vendored
Normal file
335
vendor/golang.org/x/text/unicode/bidi/bracket.go
generated
vendored
Normal file
@@ -0,0 +1,335 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bidi
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"fmt"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// This file contains a port of the reference implementation of the
|
||||
// Bidi Parentheses Algorithm:
|
||||
// http://www.unicode.org/Public/PROGRAMS/BidiReferenceJava/BidiPBAReference.java
|
||||
//
|
||||
// The implementation in this file covers definitions BD14-BD16 and rule N0
|
||||
// of UAX#9.
|
||||
//
|
||||
// Some preprocessing is done for each rune before data is passed to this
|
||||
// algorithm:
|
||||
// - opening and closing brackets are identified
|
||||
// - a bracket pair type, like '(' and ')' is assigned a unique identifier that
|
||||
// is identical for the opening and closing bracket. It is left to do these
|
||||
// mappings.
|
||||
// - The BPA algorithm requires that bracket characters that are canonical
|
||||
// equivalents of each other be able to be substituted for each other.
|
||||
// It is the responsibility of the caller to do this canonicalization.
|
||||
//
|
||||
// In implementing BD16, this implementation departs slightly from the "logical"
|
||||
// algorithm defined in UAX#9. In particular, the stack referenced there
|
||||
// supports operations that go beyond a "basic" stack. An equivalent
|
||||
// implementation based on a linked list is used here.
|
||||
|
||||
// Bidi_Paired_Bracket_Type
|
||||
// BD14. An opening paired bracket is a character whose
|
||||
// Bidi_Paired_Bracket_Type property value is Open.
|
||||
//
|
||||
// BD15. A closing paired bracket is a character whose
|
||||
// Bidi_Paired_Bracket_Type property value is Close.
|
||||
type bracketType byte
|
||||
|
||||
const (
|
||||
bpNone bracketType = iota
|
||||
bpOpen
|
||||
bpClose
|
||||
)
|
||||
|
||||
// bracketPair holds a pair of index values for opening and closing bracket
|
||||
// location of a bracket pair.
|
||||
type bracketPair struct {
|
||||
opener int
|
||||
closer int
|
||||
}
|
||||
|
||||
func (b *bracketPair) String() string {
|
||||
return fmt.Sprintf("(%v, %v)", b.opener, b.closer)
|
||||
}
|
||||
|
||||
// bracketPairs is a slice of bracketPairs with a sort.Interface implementation.
|
||||
type bracketPairs []bracketPair
|
||||
|
||||
func (b bracketPairs) Len() int { return len(b) }
|
||||
func (b bracketPairs) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||
func (b bracketPairs) Less(i, j int) bool { return b[i].opener < b[j].opener }
|
||||
|
||||
// resolvePairedBrackets runs the paired bracket part of the UBA algorithm.
|
||||
//
|
||||
// For each rune, it takes the indexes into the original string, the class the
|
||||
// bracket type (in pairTypes) and the bracket identifier (pairValues). It also
|
||||
// takes the direction type for the start-of-sentence and the embedding level.
|
||||
//
|
||||
// The identifiers for bracket types are the rune of the canonicalized opening
|
||||
// bracket for brackets (open or close) or 0 for runes that are not brackets.
|
||||
func resolvePairedBrackets(s *isolatingRunSequence) {
|
||||
p := bracketPairer{
|
||||
sos: s.sos,
|
||||
openers: list.New(),
|
||||
codesIsolatedRun: s.types,
|
||||
indexes: s.indexes,
|
||||
}
|
||||
dirEmbed := L
|
||||
if s.level&1 != 0 {
|
||||
dirEmbed = R
|
||||
}
|
||||
p.locateBrackets(s.p.pairTypes, s.p.pairValues)
|
||||
p.resolveBrackets(dirEmbed, s.p.initialTypes)
|
||||
}
|
||||
|
||||
type bracketPairer struct {
|
||||
sos Class // direction corresponding to start of sequence
|
||||
|
||||
// The following is a restatement of BD 16 using non-algorithmic language.
|
||||
//
|
||||
// A bracket pair is a pair of characters consisting of an opening
|
||||
// paired bracket and a closing paired bracket such that the
|
||||
// Bidi_Paired_Bracket property value of the former equals the latter,
|
||||
// subject to the following constraints.
|
||||
// - both characters of a pair occur in the same isolating run sequence
|
||||
// - the closing character of a pair follows the opening character
|
||||
// - any bracket character can belong at most to one pair, the earliest possible one
|
||||
// - any bracket character not part of a pair is treated like an ordinary character
|
||||
// - pairs may nest properly, but their spans may not overlap otherwise
|
||||
|
||||
// Bracket characters with canonical decompositions are supposed to be
|
||||
// treated as if they had been normalized, to allow normalized and non-
|
||||
// normalized text to give the same result. In this implementation that step
|
||||
// is pushed out to the caller. The caller has to ensure that the pairValue
|
||||
// slices contain the rune of the opening bracket after normalization for
|
||||
// any opening or closing bracket.
|
||||
|
||||
openers *list.List // list of positions for opening brackets
|
||||
|
||||
// bracket pair positions sorted by location of opening bracket
|
||||
pairPositions bracketPairs
|
||||
|
||||
codesIsolatedRun []Class // directional bidi codes for an isolated run
|
||||
indexes []int // array of index values into the original string
|
||||
|
||||
}
|
||||
|
||||
// matchOpener reports whether characters at given positions form a matching
|
||||
// bracket pair.
|
||||
func (p *bracketPairer) matchOpener(pairValues []rune, opener, closer int) bool {
|
||||
return pairValues[p.indexes[opener]] == pairValues[p.indexes[closer]]
|
||||
}
|
||||
|
||||
const maxPairingDepth = 63
|
||||
|
||||
// locateBrackets locates matching bracket pairs according to BD16.
|
||||
//
|
||||
// This implementation uses a linked list instead of a stack, because, while
|
||||
// elements are added at the front (like a push) they are not generally removed
|
||||
// in atomic 'pop' operations, reducing the benefit of the stack archetype.
|
||||
func (p *bracketPairer) locateBrackets(pairTypes []bracketType, pairValues []rune) {
|
||||
// traverse the run
|
||||
// do that explicitly (not in a for-each) so we can record position
|
||||
for i, index := range p.indexes {
|
||||
|
||||
// look at the bracket type for each character
|
||||
if pairTypes[index] == bpNone || p.codesIsolatedRun[i] != ON {
|
||||
// continue scanning
|
||||
continue
|
||||
}
|
||||
switch pairTypes[index] {
|
||||
case bpOpen:
|
||||
// check if maximum pairing depth reached
|
||||
if p.openers.Len() == maxPairingDepth {
|
||||
p.openers.Init()
|
||||
return
|
||||
}
|
||||
// remember opener location, most recent first
|
||||
p.openers.PushFront(i)
|
||||
|
||||
case bpClose:
|
||||
// see if there is a match
|
||||
count := 0
|
||||
for elem := p.openers.Front(); elem != nil; elem = elem.Next() {
|
||||
count++
|
||||
opener := elem.Value.(int)
|
||||
if p.matchOpener(pairValues, opener, i) {
|
||||
// if the opener matches, add nested pair to the ordered list
|
||||
p.pairPositions = append(p.pairPositions, bracketPair{opener, i})
|
||||
// remove up to and including matched opener
|
||||
for ; count > 0; count-- {
|
||||
p.openers.Remove(p.openers.Front())
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
sort.Sort(p.pairPositions)
|
||||
// if we get here, the closing bracket matched no openers
|
||||
// and gets ignored
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Bracket pairs within an isolating run sequence are processed as units so
|
||||
// that both the opening and the closing paired bracket in a pair resolve to
|
||||
// the same direction.
|
||||
//
|
||||
// N0. Process bracket pairs in an isolating run sequence sequentially in
|
||||
// the logical order of the text positions of the opening paired brackets
|
||||
// using the logic given below. Within this scope, bidirectional types EN
|
||||
// and AN are treated as R.
|
||||
//
|
||||
// Identify the bracket pairs in the current isolating run sequence
|
||||
// according to BD16. For each bracket-pair element in the list of pairs of
|
||||
// text positions:
|
||||
//
|
||||
// a Inspect the bidirectional types of the characters enclosed within the
|
||||
// bracket pair.
|
||||
//
|
||||
// b If any strong type (either L or R) matching the embedding direction is
|
||||
// found, set the type for both brackets in the pair to match the embedding
|
||||
// direction.
|
||||
//
|
||||
// o [ e ] o -> o e e e o
|
||||
//
|
||||
// o [ o e ] -> o e o e e
|
||||
//
|
||||
// o [ NI e ] -> o e NI e e
|
||||
//
|
||||
// c Otherwise, if a strong type (opposite the embedding direction) is
|
||||
// found, test for adjacent strong types as follows: 1 First, check
|
||||
// backwards before the opening paired bracket until the first strong type
|
||||
// (L, R, or sos) is found. If that first preceding strong type is opposite
|
||||
// the embedding direction, then set the type for both brackets in the pair
|
||||
// to that type. 2 Otherwise, set the type for both brackets in the pair to
|
||||
// the embedding direction.
|
||||
//
|
||||
// o [ o ] e -> o o o o e
|
||||
//
|
||||
// o [ o NI ] o -> o o o NI o o
|
||||
//
|
||||
// e [ o ] o -> e e o e o
|
||||
//
|
||||
// e [ o ] e -> e e o e e
|
||||
//
|
||||
// e ( o [ o ] NI ) e -> e e o o o o NI e e
|
||||
//
|
||||
// d Otherwise, do not set the type for the current bracket pair. Note that
|
||||
// if the enclosed text contains no strong types the paired brackets will
|
||||
// both resolve to the same level when resolved individually using rules N1
|
||||
// and N2.
|
||||
//
|
||||
// e ( NI ) o -> e ( NI ) o
|
||||
|
||||
// getStrongTypeN0 maps character's directional code to strong type as required
|
||||
// by rule N0.
|
||||
//
|
||||
// TODO: have separate type for "strong" directionality.
|
||||
func (p *bracketPairer) getStrongTypeN0(index int) Class {
|
||||
switch p.codesIsolatedRun[index] {
|
||||
// in the scope of N0, number types are treated as R
|
||||
case EN, AN, AL, R:
|
||||
return R
|
||||
case L:
|
||||
return L
|
||||
default:
|
||||
return ON
|
||||
}
|
||||
}
|
||||
|
||||
// classifyPairContent reports the strong types contained inside a Bracket Pair,
|
||||
// assuming the given embedding direction.
|
||||
//
|
||||
// It returns ON if no strong type is found. If a single strong type is found,
|
||||
// it returns this this type. Otherwise it returns the embedding direction.
|
||||
//
|
||||
// TODO: use separate type for "strong" directionality.
|
||||
func (p *bracketPairer) classifyPairContent(loc bracketPair, dirEmbed Class) Class {
|
||||
dirOpposite := ON
|
||||
for i := loc.opener + 1; i < loc.closer; i++ {
|
||||
dir := p.getStrongTypeN0(i)
|
||||
if dir == ON {
|
||||
continue
|
||||
}
|
||||
if dir == dirEmbed {
|
||||
return dir // type matching embedding direction found
|
||||
}
|
||||
dirOpposite = dir
|
||||
}
|
||||
// return ON if no strong type found, or class opposite to dirEmbed
|
||||
return dirOpposite
|
||||
}
|
||||
|
||||
// classBeforePair determines which strong types are present before a Bracket
|
||||
// Pair. Return R or L if strong type found, otherwise ON.
|
||||
func (p *bracketPairer) classBeforePair(loc bracketPair) Class {
|
||||
for i := loc.opener - 1; i >= 0; i-- {
|
||||
if dir := p.getStrongTypeN0(i); dir != ON {
|
||||
return dir
|
||||
}
|
||||
}
|
||||
// no strong types found, return sos
|
||||
return p.sos
|
||||
}
|
||||
|
||||
// assignBracketType implements rule N0 for a single bracket pair.
|
||||
func (p *bracketPairer) assignBracketType(loc bracketPair, dirEmbed Class, initialTypes []Class) {
|
||||
// rule "N0, a", inspect contents of pair
|
||||
dirPair := p.classifyPairContent(loc, dirEmbed)
|
||||
|
||||
// dirPair is now L, R, or N (no strong type found)
|
||||
|
||||
// the following logical tests are performed out of order compared to
|
||||
// the statement of the rules but yield the same results
|
||||
if dirPair == ON {
|
||||
return // case "d" - nothing to do
|
||||
}
|
||||
|
||||
if dirPair != dirEmbed {
|
||||
// case "c": strong type found, opposite - check before (c.1)
|
||||
dirPair = p.classBeforePair(loc)
|
||||
if dirPair == dirEmbed || dirPair == ON {
|
||||
// no strong opposite type found before - use embedding (c.2)
|
||||
dirPair = dirEmbed
|
||||
}
|
||||
}
|
||||
// else: case "b", strong type found matching embedding,
|
||||
// no explicit action needed, as dirPair is already set to embedding
|
||||
// direction
|
||||
|
||||
// set the bracket types to the type found
|
||||
p.setBracketsToType(loc, dirPair, initialTypes)
|
||||
}
|
||||
|
||||
func (p *bracketPairer) setBracketsToType(loc bracketPair, dirPair Class, initialTypes []Class) {
|
||||
p.codesIsolatedRun[loc.opener] = dirPair
|
||||
p.codesIsolatedRun[loc.closer] = dirPair
|
||||
|
||||
for i := loc.opener + 1; i < loc.closer; i++ {
|
||||
index := p.indexes[i]
|
||||
if initialTypes[index] != NSM {
|
||||
break
|
||||
}
|
||||
p.codesIsolatedRun[i] = dirPair
|
||||
}
|
||||
|
||||
for i := loc.closer + 1; i < len(p.indexes); i++ {
|
||||
index := p.indexes[i]
|
||||
if initialTypes[index] != NSM {
|
||||
break
|
||||
}
|
||||
p.codesIsolatedRun[i] = dirPair
|
||||
}
|
||||
}
|
||||
|
||||
// resolveBrackets implements rule N0 for a list of pairs.
|
||||
func (p *bracketPairer) resolveBrackets(dirEmbed Class, initialTypes []Class) {
|
||||
for _, loc := range p.pairPositions {
|
||||
p.assignBracketType(loc, dirEmbed, initialTypes)
|
||||
}
|
||||
}
|
||||
1058
vendor/golang.org/x/text/unicode/bidi/core.go
generated
vendored
Normal file
1058
vendor/golang.org/x/text/unicode/bidi/core.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
133
vendor/golang.org/x/text/unicode/bidi/gen.go
generated
vendored
Normal file
133
vendor/golang.org/x/text/unicode/bidi/gen.go
generated
vendored
Normal file
@@ -0,0 +1,133 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"log"
|
||||
|
||||
"golang.org/x/text/internal/gen"
|
||||
"golang.org/x/text/internal/triegen"
|
||||
"golang.org/x/text/internal/ucd"
|
||||
)
|
||||
|
||||
var outputFile = flag.String("out", "tables.go", "output file")
|
||||
|
||||
func main() {
|
||||
gen.Init()
|
||||
gen.Repackage("gen_trieval.go", "trieval.go", "bidi")
|
||||
gen.Repackage("gen_ranges.go", "ranges_test.go", "bidi")
|
||||
|
||||
genTables()
|
||||
}
|
||||
|
||||
// bidiClass names and codes taken from class "bc" in
|
||||
// http://www.unicode.org/Public/8.0.0/ucd/PropertyValueAliases.txt
|
||||
var bidiClass = map[string]Class{
|
||||
"AL": AL, // ArabicLetter
|
||||
"AN": AN, // ArabicNumber
|
||||
"B": B, // ParagraphSeparator
|
||||
"BN": BN, // BoundaryNeutral
|
||||
"CS": CS, // CommonSeparator
|
||||
"EN": EN, // EuropeanNumber
|
||||
"ES": ES, // EuropeanSeparator
|
||||
"ET": ET, // EuropeanTerminator
|
||||
"L": L, // LeftToRight
|
||||
"NSM": NSM, // NonspacingMark
|
||||
"ON": ON, // OtherNeutral
|
||||
"R": R, // RightToLeft
|
||||
"S": S, // SegmentSeparator
|
||||
"WS": WS, // WhiteSpace
|
||||
|
||||
"FSI": Control,
|
||||
"PDF": Control,
|
||||
"PDI": Control,
|
||||
"LRE": Control,
|
||||
"LRI": Control,
|
||||
"LRO": Control,
|
||||
"RLE": Control,
|
||||
"RLI": Control,
|
||||
"RLO": Control,
|
||||
}
|
||||
|
||||
func genTables() {
|
||||
if numClass > 0x0F {
|
||||
log.Fatalf("Too many Class constants (%#x > 0x0F).", numClass)
|
||||
}
|
||||
w := gen.NewCodeWriter()
|
||||
defer w.WriteGoFile(*outputFile, "bidi")
|
||||
|
||||
gen.WriteUnicodeVersion(w)
|
||||
|
||||
t := triegen.NewTrie("bidi")
|
||||
|
||||
// Build data about bracket mapping. These bits need to be or-ed with
|
||||
// any other bits.
|
||||
orMask := map[rune]uint64{}
|
||||
|
||||
xorMap := map[rune]int{}
|
||||
xorMasks := []rune{0} // First value is no-op.
|
||||
|
||||
ucd.Parse(gen.OpenUCDFile("BidiBrackets.txt"), func(p *ucd.Parser) {
|
||||
r1 := p.Rune(0)
|
||||
r2 := p.Rune(1)
|
||||
xor := r1 ^ r2
|
||||
if _, ok := xorMap[xor]; !ok {
|
||||
xorMap[xor] = len(xorMasks)
|
||||
xorMasks = append(xorMasks, xor)
|
||||
}
|
||||
entry := uint64(xorMap[xor]) << xorMaskShift
|
||||
switch p.String(2) {
|
||||
case "o":
|
||||
entry |= openMask
|
||||
case "c", "n":
|
||||
default:
|
||||
log.Fatalf("Unknown bracket class %q.", p.String(2))
|
||||
}
|
||||
orMask[r1] = entry
|
||||
})
|
||||
|
||||
w.WriteComment(`
|
||||
xorMasks contains masks to be xor-ed with brackets to get the reverse
|
||||
version.`)
|
||||
w.WriteVar("xorMasks", xorMasks)
|
||||
|
||||
done := map[rune]bool{}
|
||||
|
||||
insert := func(r rune, c Class) {
|
||||
if !done[r] {
|
||||
t.Insert(r, orMask[r]|uint64(c))
|
||||
done[r] = true
|
||||
}
|
||||
}
|
||||
|
||||
// Insert the derived BiDi properties.
|
||||
ucd.Parse(gen.OpenUCDFile("extracted/DerivedBidiClass.txt"), func(p *ucd.Parser) {
|
||||
r := p.Rune(0)
|
||||
class, ok := bidiClass[p.String(1)]
|
||||
if !ok {
|
||||
log.Fatalf("%U: Unknown BiDi class %q", r, p.String(1))
|
||||
}
|
||||
insert(r, class)
|
||||
})
|
||||
visitDefaults(insert)
|
||||
|
||||
// TODO: use sparse blocks. This would reduce table size considerably
|
||||
// from the looks of it.
|
||||
|
||||
sz, err := t.Gen(w)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
w.Size += sz
|
||||
}
|
||||
|
||||
// dummy values to make methods in gen_common compile. The real versions
|
||||
// will be generated by this file to tables.go.
|
||||
var (
|
||||
xorMasks []rune
|
||||
)
|
||||
57
vendor/golang.org/x/text/unicode/bidi/gen_ranges.go
generated
vendored
Normal file
57
vendor/golang.org/x/text/unicode/bidi/gen_ranges.go
generated
vendored
Normal file
@@ -0,0 +1,57 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"unicode"
|
||||
|
||||
"golang.org/x/text/internal/gen"
|
||||
"golang.org/x/text/internal/ucd"
|
||||
"golang.org/x/text/unicode/rangetable"
|
||||
)
|
||||
|
||||
// These tables are hand-extracted from:
|
||||
// http://www.unicode.org/Public/8.0.0/ucd/extracted/DerivedBidiClass.txt
|
||||
func visitDefaults(fn func(r rune, c Class)) {
|
||||
// first write default values for ranges listed above.
|
||||
visitRunes(fn, AL, []rune{
|
||||
0x0600, 0x07BF, // Arabic
|
||||
0x08A0, 0x08FF, // Arabic Extended-A
|
||||
0xFB50, 0xFDCF, // Arabic Presentation Forms
|
||||
0xFDF0, 0xFDFF,
|
||||
0xFE70, 0xFEFF,
|
||||
0x0001EE00, 0x0001EEFF, // Arabic Mathematical Alpha Symbols
|
||||
})
|
||||
visitRunes(fn, R, []rune{
|
||||
0x0590, 0x05FF, // Hebrew
|
||||
0x07C0, 0x089F, // Nko et al.
|
||||
0xFB1D, 0xFB4F,
|
||||
0x00010800, 0x00010FFF, // Cypriot Syllabary et. al.
|
||||
0x0001E800, 0x0001EDFF,
|
||||
0x0001EF00, 0x0001EFFF,
|
||||
})
|
||||
visitRunes(fn, ET, []rune{ // European Terminator
|
||||
0x20A0, 0x20Cf, // Currency symbols
|
||||
})
|
||||
rangetable.Visit(unicode.Noncharacter_Code_Point, func(r rune) {
|
||||
fn(r, BN) // Boundary Neutral
|
||||
})
|
||||
ucd.Parse(gen.OpenUCDFile("DerivedCoreProperties.txt"), func(p *ucd.Parser) {
|
||||
if p.String(1) == "Default_Ignorable_Code_Point" {
|
||||
fn(p.Rune(0), BN) // Boundary Neutral
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func visitRunes(fn func(r rune, c Class), c Class, runes []rune) {
|
||||
for i := 0; i < len(runes); i += 2 {
|
||||
lo, hi := runes[i], runes[i+1]
|
||||
for j := lo; j <= hi; j++ {
|
||||
fn(j, c)
|
||||
}
|
||||
}
|
||||
}
|
||||
64
vendor/golang.org/x/text/unicode/bidi/gen_trieval.go
generated
vendored
Normal file
64
vendor/golang.org/x/text/unicode/bidi/gen_trieval.go
generated
vendored
Normal file
@@ -0,0 +1,64 @@
|
||||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
// Class is the Unicode BiDi class. Each rune has a single class.
|
||||
type Class uint
|
||||
|
||||
const (
|
||||
L Class = iota // LeftToRight
|
||||
R // RightToLeft
|
||||
EN // EuropeanNumber
|
||||
ES // EuropeanSeparator
|
||||
ET // EuropeanTerminator
|
||||
AN // ArabicNumber
|
||||
CS // CommonSeparator
|
||||
B // ParagraphSeparator
|
||||
S // SegmentSeparator
|
||||
WS // WhiteSpace
|
||||
ON // OtherNeutral
|
||||
BN // BoundaryNeutral
|
||||
NSM // NonspacingMark
|
||||
AL // ArabicLetter
|
||||
Control // Control LRO - PDI
|
||||
|
||||
numClass
|
||||
|
||||
LRO // LeftToRightOverride
|
||||
RLO // RightToLeftOverride
|
||||
LRE // LeftToRightEmbedding
|
||||
RLE // RightToLeftEmbedding
|
||||
PDF // PopDirectionalFormat
|
||||
LRI // LeftToRightIsolate
|
||||
RLI // RightToLeftIsolate
|
||||
FSI // FirstStrongIsolate
|
||||
PDI // PopDirectionalIsolate
|
||||
|
||||
unknownClass = ^Class(0)
|
||||
)
|
||||
|
||||
var controlToClass = map[rune]Class{
|
||||
0x202D: LRO, // LeftToRightOverride,
|
||||
0x202E: RLO, // RightToLeftOverride,
|
||||
0x202A: LRE, // LeftToRightEmbedding,
|
||||
0x202B: RLE, // RightToLeftEmbedding,
|
||||
0x202C: PDF, // PopDirectionalFormat,
|
||||
0x2066: LRI, // LeftToRightIsolate,
|
||||
0x2067: RLI, // RightToLeftIsolate,
|
||||
0x2068: FSI, // FirstStrongIsolate,
|
||||
0x2069: PDI, // PopDirectionalIsolate,
|
||||
}
|
||||
|
||||
// A trie entry has the following bits:
|
||||
// 7..5 XOR mask for brackets
|
||||
// 4 1: Bracket open, 0: Bracket close
|
||||
// 3..0 Class type
|
||||
|
||||
const (
|
||||
openMask = 0x10
|
||||
xorMaskShift = 5
|
||||
)
|
||||
206
vendor/golang.org/x/text/unicode/bidi/prop.go
generated
vendored
Normal file
206
vendor/golang.org/x/text/unicode/bidi/prop.go
generated
vendored
Normal file
@@ -0,0 +1,206 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package bidi
|
||||
|
||||
import "unicode/utf8"
|
||||
|
||||
// Properties provides access to BiDi properties of runes.
|
||||
type Properties struct {
|
||||
entry uint8
|
||||
last uint8
|
||||
}
|
||||
|
||||
var trie = newBidiTrie(0)
|
||||
|
||||
// TODO: using this for bidirule reduces the running time by about 5%. Consider
|
||||
// if this is worth exposing or if we can find a way to speed up the Class
|
||||
// method.
|
||||
//
|
||||
// // CompactClass is like Class, but maps all of the BiDi control classes
|
||||
// // (LRO, RLO, LRE, RLE, PDF, LRI, RLI, FSI, PDI) to the class Control.
|
||||
// func (p Properties) CompactClass() Class {
|
||||
// return Class(p.entry & 0x0F)
|
||||
// }
|
||||
|
||||
// Class returns the Bidi class for p.
|
||||
func (p Properties) Class() Class {
|
||||
c := Class(p.entry & 0x0F)
|
||||
if c == Control {
|
||||
c = controlByteToClass[p.last&0xF]
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// IsBracket reports whether the rune is a bracket.
|
||||
func (p Properties) IsBracket() bool { return p.entry&0xF0 != 0 }
|
||||
|
||||
// IsOpeningBracket reports whether the rune is an opening bracket.
|
||||
// IsBracket must return true.
|
||||
func (p Properties) IsOpeningBracket() bool { return p.entry&openMask != 0 }
|
||||
|
||||
// TODO: find a better API and expose.
|
||||
func (p Properties) reverseBracket(r rune) rune {
|
||||
return xorMasks[p.entry>>xorMaskShift] ^ r
|
||||
}
|
||||
|
||||
var controlByteToClass = [16]Class{
|
||||
0xD: LRO, // U+202D LeftToRightOverride,
|
||||
0xE: RLO, // U+202E RightToLeftOverride,
|
||||
0xA: LRE, // U+202A LeftToRightEmbedding,
|
||||
0xB: RLE, // U+202B RightToLeftEmbedding,
|
||||
0xC: PDF, // U+202C PopDirectionalFormat,
|
||||
0x6: LRI, // U+2066 LeftToRightIsolate,
|
||||
0x7: RLI, // U+2067 RightToLeftIsolate,
|
||||
0x8: FSI, // U+2068 FirstStrongIsolate,
|
||||
0x9: PDI, // U+2069 PopDirectionalIsolate,
|
||||
}
|
||||
|
||||
// LookupRune returns properties for r.
|
||||
func LookupRune(r rune) (p Properties, size int) {
|
||||
var buf [4]byte
|
||||
n := utf8.EncodeRune(buf[:], r)
|
||||
return Lookup(buf[:n])
|
||||
}
|
||||
|
||||
// TODO: these lookup methods are based on the generated trie code. The returned
|
||||
// sizes have slightly different semantics from the generated code, in that it
|
||||
// always returns size==1 for an illegal UTF-8 byte (instead of the length
|
||||
// of the maximum invalid subsequence). Most Transformers, like unicode/norm,
|
||||
// leave invalid UTF-8 untouched, in which case it has performance benefits to
|
||||
// do so (without changing the semantics). Bidi requires the semantics used here
|
||||
// for the bidirule implementation to be compatible with the Go semantics.
|
||||
// They ultimately should perhaps be adopted by all trie implementations, for
|
||||
// convenience sake.
|
||||
// This unrolled code also boosts performance of the secure/bidirule package by
|
||||
// about 30%.
|
||||
// So, to remove this code:
|
||||
// - add option to trie generator to define return type.
|
||||
// - always return 1 byte size for ill-formed UTF-8 runes.
|
||||
|
||||
// Lookup returns properties for the first rune in s and the width in bytes of
|
||||
// its encoding. The size will be 0 if s does not hold enough bytes to complete
|
||||
// the encoding.
|
||||
func Lookup(s []byte) (p Properties, sz int) {
|
||||
c0 := s[0]
|
||||
switch {
|
||||
case c0 < 0x80: // is ASCII
|
||||
return Properties{entry: bidiValues[c0]}, 1
|
||||
case c0 < 0xC2:
|
||||
return Properties{}, 1
|
||||
case c0 < 0xE0: // 2-byte UTF-8
|
||||
if len(s) < 2 {
|
||||
return Properties{}, 0
|
||||
}
|
||||
i := bidiIndex[c0]
|
||||
c1 := s[1]
|
||||
if c1 < 0x80 || 0xC0 <= c1 {
|
||||
return Properties{}, 1
|
||||
}
|
||||
return Properties{entry: trie.lookupValue(uint32(i), c1)}, 2
|
||||
case c0 < 0xF0: // 3-byte UTF-8
|
||||
if len(s) < 3 {
|
||||
return Properties{}, 0
|
||||
}
|
||||
i := bidiIndex[c0]
|
||||
c1 := s[1]
|
||||
if c1 < 0x80 || 0xC0 <= c1 {
|
||||
return Properties{}, 1
|
||||
}
|
||||
o := uint32(i)<<6 + uint32(c1)
|
||||
i = bidiIndex[o]
|
||||
c2 := s[2]
|
||||
if c2 < 0x80 || 0xC0 <= c2 {
|
||||
return Properties{}, 1
|
||||
}
|
||||
return Properties{entry: trie.lookupValue(uint32(i), c2), last: c2}, 3
|
||||
case c0 < 0xF8: // 4-byte UTF-8
|
||||
if len(s) < 4 {
|
||||
return Properties{}, 0
|
||||
}
|
||||
i := bidiIndex[c0]
|
||||
c1 := s[1]
|
||||
if c1 < 0x80 || 0xC0 <= c1 {
|
||||
return Properties{}, 1
|
||||
}
|
||||
o := uint32(i)<<6 + uint32(c1)
|
||||
i = bidiIndex[o]
|
||||
c2 := s[2]
|
||||
if c2 < 0x80 || 0xC0 <= c2 {
|
||||
return Properties{}, 1
|
||||
}
|
||||
o = uint32(i)<<6 + uint32(c2)
|
||||
i = bidiIndex[o]
|
||||
c3 := s[3]
|
||||
if c3 < 0x80 || 0xC0 <= c3 {
|
||||
return Properties{}, 1
|
||||
}
|
||||
return Properties{entry: trie.lookupValue(uint32(i), c3)}, 4
|
||||
}
|
||||
// Illegal rune
|
||||
return Properties{}, 1
|
||||
}
|
||||
|
||||
// LookupString returns properties for the first rune in s and the width in
|
||||
// bytes of its encoding. The size will be 0 if s does not hold enough bytes to
|
||||
// complete the encoding.
|
||||
func LookupString(s string) (p Properties, sz int) {
|
||||
c0 := s[0]
|
||||
switch {
|
||||
case c0 < 0x80: // is ASCII
|
||||
return Properties{entry: bidiValues[c0]}, 1
|
||||
case c0 < 0xC2:
|
||||
return Properties{}, 1
|
||||
case c0 < 0xE0: // 2-byte UTF-8
|
||||
if len(s) < 2 {
|
||||
return Properties{}, 0
|
||||
}
|
||||
i := bidiIndex[c0]
|
||||
c1 := s[1]
|
||||
if c1 < 0x80 || 0xC0 <= c1 {
|
||||
return Properties{}, 1
|
||||
}
|
||||
return Properties{entry: trie.lookupValue(uint32(i), c1)}, 2
|
||||
case c0 < 0xF0: // 3-byte UTF-8
|
||||
if len(s) < 3 {
|
||||
return Properties{}, 0
|
||||
}
|
||||
i := bidiIndex[c0]
|
||||
c1 := s[1]
|
||||
if c1 < 0x80 || 0xC0 <= c1 {
|
||||
return Properties{}, 1
|
||||
}
|
||||
o := uint32(i)<<6 + uint32(c1)
|
||||
i = bidiIndex[o]
|
||||
c2 := s[2]
|
||||
if c2 < 0x80 || 0xC0 <= c2 {
|
||||
return Properties{}, 1
|
||||
}
|
||||
return Properties{entry: trie.lookupValue(uint32(i), c2), last: c2}, 3
|
||||
case c0 < 0xF8: // 4-byte UTF-8
|
||||
if len(s) < 4 {
|
||||
return Properties{}, 0
|
||||
}
|
||||
i := bidiIndex[c0]
|
||||
c1 := s[1]
|
||||
if c1 < 0x80 || 0xC0 <= c1 {
|
||||
return Properties{}, 1
|
||||
}
|
||||
o := uint32(i)<<6 + uint32(c1)
|
||||
i = bidiIndex[o]
|
||||
c2 := s[2]
|
||||
if c2 < 0x80 || 0xC0 <= c2 {
|
||||
return Properties{}, 1
|
||||
}
|
||||
o = uint32(i)<<6 + uint32(c2)
|
||||
i = bidiIndex[o]
|
||||
c3 := s[3]
|
||||
if c3 < 0x80 || 0xC0 <= c3 {
|
||||
return Properties{}, 1
|
||||
}
|
||||
return Properties{entry: trie.lookupValue(uint32(i), c3)}, 4
|
||||
}
|
||||
// Illegal rune
|
||||
return Properties{}, 1
|
||||
}
|
||||
1779
vendor/golang.org/x/text/unicode/bidi/tables.go
generated
vendored
Normal file
1779
vendor/golang.org/x/text/unicode/bidi/tables.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
60
vendor/golang.org/x/text/unicode/bidi/trieval.go
generated
vendored
Normal file
60
vendor/golang.org/x/text/unicode/bidi/trieval.go
generated
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
// This file was generated by go generate; DO NOT EDIT
|
||||
|
||||
package bidi
|
||||
|
||||
// Class is the Unicode BiDi class. Each rune has a single class.
|
||||
type Class uint
|
||||
|
||||
const (
|
||||
L Class = iota // LeftToRight
|
||||
R // RightToLeft
|
||||
EN // EuropeanNumber
|
||||
ES // EuropeanSeparator
|
||||
ET // EuropeanTerminator
|
||||
AN // ArabicNumber
|
||||
CS // CommonSeparator
|
||||
B // ParagraphSeparator
|
||||
S // SegmentSeparator
|
||||
WS // WhiteSpace
|
||||
ON // OtherNeutral
|
||||
BN // BoundaryNeutral
|
||||
NSM // NonspacingMark
|
||||
AL // ArabicLetter
|
||||
Control // Control LRO - PDI
|
||||
|
||||
numClass
|
||||
|
||||
LRO // LeftToRightOverride
|
||||
RLO // RightToLeftOverride
|
||||
LRE // LeftToRightEmbedding
|
||||
RLE // RightToLeftEmbedding
|
||||
PDF // PopDirectionalFormat
|
||||
LRI // LeftToRightIsolate
|
||||
RLI // RightToLeftIsolate
|
||||
FSI // FirstStrongIsolate
|
||||
PDI // PopDirectionalIsolate
|
||||
|
||||
unknownClass = ^Class(0)
|
||||
)
|
||||
|
||||
var controlToClass = map[rune]Class{
|
||||
0x202D: LRO, // LeftToRightOverride,
|
||||
0x202E: RLO, // RightToLeftOverride,
|
||||
0x202A: LRE, // LeftToRightEmbedding,
|
||||
0x202B: RLE, // RightToLeftEmbedding,
|
||||
0x202C: PDF, // PopDirectionalFormat,
|
||||
0x2066: LRI, // LeftToRightIsolate,
|
||||
0x2067: RLI, // RightToLeftIsolate,
|
||||
0x2068: FSI, // FirstStrongIsolate,
|
||||
0x2069: PDI, // PopDirectionalIsolate,
|
||||
}
|
||||
|
||||
// A trie entry has the following bits:
|
||||
// 7..5 XOR mask for brackets
|
||||
// 4 1: Bracket open, 0: Bracket close
|
||||
// 3..0 Class type
|
||||
|
||||
const (
|
||||
openMask = 0x10
|
||||
xorMaskShift = 5
|
||||
)
|
||||
100
vendor/golang.org/x/text/unicode/cldr/base.go
generated
vendored
Normal file
100
vendor/golang.org/x/text/unicode/cldr/base.go
generated
vendored
Normal file
@@ -0,0 +1,100 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cldr
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"regexp"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Elem is implemented by every XML element.
|
||||
type Elem interface {
|
||||
setEnclosing(Elem)
|
||||
setName(string)
|
||||
enclosing() Elem
|
||||
|
||||
GetCommon() *Common
|
||||
}
|
||||
|
||||
type hidden struct {
|
||||
CharData string `xml:",chardata"`
|
||||
Alias *struct {
|
||||
Common
|
||||
Source string `xml:"source,attr"`
|
||||
Path string `xml:"path,attr"`
|
||||
} `xml:"alias"`
|
||||
Def *struct {
|
||||
Common
|
||||
Choice string `xml:"choice,attr,omitempty"`
|
||||
Type string `xml:"type,attr,omitempty"`
|
||||
} `xml:"default"`
|
||||
}
|
||||
|
||||
// Common holds several of the most common attributes and sub elements
|
||||
// of an XML element.
|
||||
type Common struct {
|
||||
XMLName xml.Name
|
||||
name string
|
||||
enclElem Elem
|
||||
Type string `xml:"type,attr,omitempty"`
|
||||
Reference string `xml:"reference,attr,omitempty"`
|
||||
Alt string `xml:"alt,attr,omitempty"`
|
||||
ValidSubLocales string `xml:"validSubLocales,attr,omitempty"`
|
||||
Draft string `xml:"draft,attr,omitempty"`
|
||||
hidden
|
||||
}
|
||||
|
||||
// Default returns the default type to select from the enclosed list
|
||||
// or "" if no default value is specified.
|
||||
func (e *Common) Default() string {
|
||||
if e.Def == nil {
|
||||
return ""
|
||||
}
|
||||
if e.Def.Choice != "" {
|
||||
return e.Def.Choice
|
||||
} else if e.Def.Type != "" {
|
||||
// Type is still used by the default element in collation.
|
||||
return e.Def.Type
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// GetCommon returns e. It is provided such that Common implements Elem.
|
||||
func (e *Common) GetCommon() *Common {
|
||||
return e
|
||||
}
|
||||
|
||||
// Data returns the character data accumulated for this element.
|
||||
func (e *Common) Data() string {
|
||||
e.CharData = charRe.ReplaceAllStringFunc(e.CharData, replaceUnicode)
|
||||
return e.CharData
|
||||
}
|
||||
|
||||
func (e *Common) setName(s string) {
|
||||
e.name = s
|
||||
}
|
||||
|
||||
func (e *Common) enclosing() Elem {
|
||||
return e.enclElem
|
||||
}
|
||||
|
||||
func (e *Common) setEnclosing(en Elem) {
|
||||
e.enclElem = en
|
||||
}
|
||||
|
||||
// Escape characters that can be escaped without further escaping the string.
|
||||
var charRe = regexp.MustCompile(`&#x[0-9a-fA-F]*;|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|\\x[0-9a-fA-F]{2}|\\[0-7]{3}|\\[abtnvfr]`)
|
||||
|
||||
// replaceUnicode converts hexadecimal Unicode codepoint notations to a one-rune string.
|
||||
// It assumes the input string is correctly formatted.
|
||||
func replaceUnicode(s string) string {
|
||||
if s[1] == '#' {
|
||||
r, _ := strconv.ParseInt(s[3:len(s)-1], 16, 32)
|
||||
return string(r)
|
||||
}
|
||||
r, _, _, _ := strconv.UnquoteChar(s, 0)
|
||||
return string(r)
|
||||
}
|
||||
130
vendor/golang.org/x/text/unicode/cldr/cldr.go
generated
vendored
Normal file
130
vendor/golang.org/x/text/unicode/cldr/cldr.go
generated
vendored
Normal file
@@ -0,0 +1,130 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:generate go run makexml.go -output xml.go
|
||||
|
||||
// Package cldr provides a parser for LDML and related XML formats.
|
||||
// This package is intended to be used by the table generation tools
|
||||
// for the various internationalization-related packages.
|
||||
// As the XML types are generated from the CLDR DTD, and as the CLDR standard
|
||||
// is periodically amended, this package may change considerably over time.
|
||||
// This mostly means that data may appear and disappear between versions.
|
||||
// That is, old code should keep compiling for newer versions, but data
|
||||
// may have moved or changed.
|
||||
// CLDR version 22 is the first version supported by this package.
|
||||
// Older versions may not work.
|
||||
package cldr // import "golang.org/x/text/unicode/cldr"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// CLDR provides access to parsed data of the Unicode Common Locale Data Repository.
|
||||
type CLDR struct {
|
||||
parent map[string][]string
|
||||
locale map[string]*LDML
|
||||
resolved map[string]*LDML
|
||||
bcp47 *LDMLBCP47
|
||||
supp *SupplementalData
|
||||
}
|
||||
|
||||
func makeCLDR() *CLDR {
|
||||
return &CLDR{
|
||||
parent: make(map[string][]string),
|
||||
locale: make(map[string]*LDML),
|
||||
resolved: make(map[string]*LDML),
|
||||
bcp47: &LDMLBCP47{},
|
||||
supp: &SupplementalData{},
|
||||
}
|
||||
}
|
||||
|
||||
// BCP47 returns the parsed BCP47 LDML data. If no such data was parsed, nil is returned.
|
||||
func (cldr *CLDR) BCP47() *LDMLBCP47 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Draft indicates the draft level of an element.
|
||||
type Draft int
|
||||
|
||||
const (
|
||||
Approved Draft = iota
|
||||
Contributed
|
||||
Provisional
|
||||
Unconfirmed
|
||||
)
|
||||
|
||||
var drafts = []string{"unconfirmed", "provisional", "contributed", "approved", ""}
|
||||
|
||||
// ParseDraft returns the Draft value corresponding to the given string. The
|
||||
// empty string corresponds to Approved.
|
||||
func ParseDraft(level string) (Draft, error) {
|
||||
if level == "" {
|
||||
return Approved, nil
|
||||
}
|
||||
for i, s := range drafts {
|
||||
if level == s {
|
||||
return Unconfirmed - Draft(i), nil
|
||||
}
|
||||
}
|
||||
return Approved, fmt.Errorf("cldr: unknown draft level %q", level)
|
||||
}
|
||||
|
||||
func (d Draft) String() string {
|
||||
return drafts[len(drafts)-1-int(d)]
|
||||
}
|
||||
|
||||
// SetDraftLevel sets which draft levels to include in the evaluated LDML.
|
||||
// Any draft element for which the draft level is higher than lev will be excluded.
|
||||
// If multiple draft levels are available for a single element, the one with the
|
||||
// lowest draft level will be selected, unless preferDraft is true, in which case
|
||||
// the highest draft will be chosen.
|
||||
// It is assumed that the underlying LDML is canonicalized.
|
||||
func (cldr *CLDR) SetDraftLevel(lev Draft, preferDraft bool) {
|
||||
// TODO: implement
|
||||
cldr.resolved = make(map[string]*LDML)
|
||||
}
|
||||
|
||||
// RawLDML returns the LDML XML for id in unresolved form.
|
||||
// id must be one of the strings returned by Locales.
|
||||
func (cldr *CLDR) RawLDML(loc string) *LDML {
|
||||
return cldr.locale[loc]
|
||||
}
|
||||
|
||||
// LDML returns the fully resolved LDML XML for loc, which must be one of
|
||||
// the strings returned by Locales.
|
||||
func (cldr *CLDR) LDML(loc string) (*LDML, error) {
|
||||
return cldr.resolve(loc)
|
||||
}
|
||||
|
||||
// Supplemental returns the parsed supplemental data. If no such data was parsed,
|
||||
// nil is returned.
|
||||
func (cldr *CLDR) Supplemental() *SupplementalData {
|
||||
return cldr.supp
|
||||
}
|
||||
|
||||
// Locales returns the locales for which there exist files.
|
||||
// Valid sublocales for which there is no file are not included.
|
||||
// The root locale is always sorted first.
|
||||
func (cldr *CLDR) Locales() []string {
|
||||
loc := []string{"root"}
|
||||
hasRoot := false
|
||||
for l, _ := range cldr.locale {
|
||||
if l == "root" {
|
||||
hasRoot = true
|
||||
continue
|
||||
}
|
||||
loc = append(loc, l)
|
||||
}
|
||||
sort.Strings(loc[1:])
|
||||
if !hasRoot {
|
||||
return loc[1:]
|
||||
}
|
||||
return loc
|
||||
}
|
||||
|
||||
// Get fills in the fields of x based on the XPath path.
|
||||
func Get(e Elem, path string) (res Elem, err error) {
|
||||
return walkXPath(e, path)
|
||||
}
|
||||
359
vendor/golang.org/x/text/unicode/cldr/collate.go
generated
vendored
Normal file
359
vendor/golang.org/x/text/unicode/cldr/collate.go
generated
vendored
Normal file
@@ -0,0 +1,359 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cldr
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// RuleProcessor can be passed to Collator's Process method, which
|
||||
// parses the rules and calls the respective method for each rule found.
|
||||
type RuleProcessor interface {
|
||||
Reset(anchor string, before int) error
|
||||
Insert(level int, str, context, extend string) error
|
||||
Index(id string)
|
||||
}
|
||||
|
||||
const (
|
||||
// cldrIndex is a Unicode-reserved sentinel value used to mark the start
|
||||
// of a grouping within an index.
|
||||
// We ignore any rule that starts with this rune.
|
||||
// See http://unicode.org/reports/tr35/#Collation_Elements for details.
|
||||
cldrIndex = "\uFDD0"
|
||||
|
||||
// specialAnchor is the format in which to represent logical reset positions,
|
||||
// such as "first tertiary ignorable".
|
||||
specialAnchor = "<%s/>"
|
||||
)
|
||||
|
||||
// Process parses the rules for the tailorings of this collation
|
||||
// and calls the respective methods of p for each rule found.
|
||||
func (c Collation) Process(p RuleProcessor) (err error) {
|
||||
if len(c.Cr) > 0 {
|
||||
if len(c.Cr) > 1 {
|
||||
return fmt.Errorf("multiple cr elements, want 0 or 1")
|
||||
}
|
||||
return processRules(p, c.Cr[0].Data())
|
||||
}
|
||||
if c.Rules.Any != nil {
|
||||
return c.processXML(p)
|
||||
}
|
||||
return errors.New("no tailoring data")
|
||||
}
|
||||
|
||||
// processRules parses rules in the Collation Rule Syntax defined in
|
||||
// http://www.unicode.org/reports/tr35/tr35-collation.html#Collation_Tailorings.
|
||||
func processRules(p RuleProcessor, s string) (err error) {
|
||||
chk := func(s string, e error) string {
|
||||
if err == nil {
|
||||
err = e
|
||||
}
|
||||
return s
|
||||
}
|
||||
i := 0 // Save the line number for use after the loop.
|
||||
scanner := bufio.NewScanner(strings.NewReader(s))
|
||||
for ; scanner.Scan() && err == nil; i++ {
|
||||
for s := skipSpace(scanner.Text()); s != "" && s[0] != '#'; s = skipSpace(s) {
|
||||
level := 5
|
||||
var ch byte
|
||||
switch ch, s = s[0], s[1:]; ch {
|
||||
case '&': // followed by <anchor> or '[' <key> ']'
|
||||
if s = skipSpace(s); consume(&s, '[') {
|
||||
s = chk(parseSpecialAnchor(p, s))
|
||||
} else {
|
||||
s = chk(parseAnchor(p, 0, s))
|
||||
}
|
||||
case '<': // sort relation '<'{1,4}, optionally followed by '*'.
|
||||
for level = 1; consume(&s, '<'); level++ {
|
||||
}
|
||||
if level > 4 {
|
||||
err = fmt.Errorf("level %d > 4", level)
|
||||
}
|
||||
fallthrough
|
||||
case '=': // identity relation, optionally followed by *.
|
||||
if consume(&s, '*') {
|
||||
s = chk(parseSequence(p, level, s))
|
||||
} else {
|
||||
s = chk(parseOrder(p, level, s))
|
||||
}
|
||||
default:
|
||||
chk("", fmt.Errorf("illegal operator %q", ch))
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if chk("", scanner.Err()); err != nil {
|
||||
return fmt.Errorf("%d: %v", i, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// parseSpecialAnchor parses the anchor syntax which is either of the form
|
||||
// ['before' <level>] <anchor>
|
||||
// or
|
||||
// [<label>]
|
||||
// The starting should already be consumed.
|
||||
func parseSpecialAnchor(p RuleProcessor, s string) (tail string, err error) {
|
||||
i := strings.IndexByte(s, ']')
|
||||
if i == -1 {
|
||||
return "", errors.New("unmatched bracket")
|
||||
}
|
||||
a := strings.TrimSpace(s[:i])
|
||||
s = s[i+1:]
|
||||
if strings.HasPrefix(a, "before ") {
|
||||
l, err := strconv.ParseUint(skipSpace(a[len("before "):]), 10, 3)
|
||||
if err != nil {
|
||||
return s, err
|
||||
}
|
||||
return parseAnchor(p, int(l), s)
|
||||
}
|
||||
return s, p.Reset(fmt.Sprintf(specialAnchor, a), 0)
|
||||
}
|
||||
|
||||
func parseAnchor(p RuleProcessor, level int, s string) (tail string, err error) {
|
||||
anchor, s, err := scanString(s)
|
||||
if err != nil {
|
||||
return s, err
|
||||
}
|
||||
return s, p.Reset(anchor, level)
|
||||
}
|
||||
|
||||
func parseOrder(p RuleProcessor, level int, s string) (tail string, err error) {
|
||||
var value, context, extend string
|
||||
if value, s, err = scanString(s); err != nil {
|
||||
return s, err
|
||||
}
|
||||
if strings.HasPrefix(value, cldrIndex) {
|
||||
p.Index(value[len(cldrIndex):])
|
||||
return
|
||||
}
|
||||
if consume(&s, '|') {
|
||||
if context, s, err = scanString(s); err != nil {
|
||||
return s, errors.New("missing string after context")
|
||||
}
|
||||
}
|
||||
if consume(&s, '/') {
|
||||
if extend, s, err = scanString(s); err != nil {
|
||||
return s, errors.New("missing string after extension")
|
||||
}
|
||||
}
|
||||
return s, p.Insert(level, value, context, extend)
|
||||
}
|
||||
|
||||
// scanString scans a single input string.
|
||||
func scanString(s string) (str, tail string, err error) {
|
||||
if s = skipSpace(s); s == "" {
|
||||
return s, s, errors.New("missing string")
|
||||
}
|
||||
buf := [16]byte{} // small but enough to hold most cases.
|
||||
value := buf[:0]
|
||||
for s != "" {
|
||||
if consume(&s, '\'') {
|
||||
i := strings.IndexByte(s, '\'')
|
||||
if i == -1 {
|
||||
return "", "", errors.New(`unmatched single quote`)
|
||||
}
|
||||
if i == 0 {
|
||||
value = append(value, '\'')
|
||||
} else {
|
||||
value = append(value, s[:i]...)
|
||||
}
|
||||
s = s[i+1:]
|
||||
continue
|
||||
}
|
||||
r, sz := utf8.DecodeRuneInString(s)
|
||||
if unicode.IsSpace(r) || strings.ContainsRune("&<=#", r) {
|
||||
break
|
||||
}
|
||||
value = append(value, s[:sz]...)
|
||||
s = s[sz:]
|
||||
}
|
||||
return string(value), skipSpace(s), nil
|
||||
}
|
||||
|
||||
func parseSequence(p RuleProcessor, level int, s string) (tail string, err error) {
|
||||
if s = skipSpace(s); s == "" {
|
||||
return s, errors.New("empty sequence")
|
||||
}
|
||||
last := rune(0)
|
||||
for s != "" {
|
||||
r, sz := utf8.DecodeRuneInString(s)
|
||||
s = s[sz:]
|
||||
|
||||
if r == '-' {
|
||||
// We have a range. The first element was already written.
|
||||
if last == 0 {
|
||||
return s, errors.New("range without starter value")
|
||||
}
|
||||
r, sz = utf8.DecodeRuneInString(s)
|
||||
s = s[sz:]
|
||||
if r == utf8.RuneError || r < last {
|
||||
return s, fmt.Errorf("invalid range %q-%q", last, r)
|
||||
}
|
||||
for i := last + 1; i <= r; i++ {
|
||||
if err := p.Insert(level, string(i), "", ""); err != nil {
|
||||
return s, err
|
||||
}
|
||||
}
|
||||
last = 0
|
||||
continue
|
||||
}
|
||||
|
||||
if unicode.IsSpace(r) || unicode.IsPunct(r) {
|
||||
break
|
||||
}
|
||||
|
||||
// normal case
|
||||
if err := p.Insert(level, string(r), "", ""); err != nil {
|
||||
return s, err
|
||||
}
|
||||
last = r
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func skipSpace(s string) string {
|
||||
return strings.TrimLeftFunc(s, unicode.IsSpace)
|
||||
}
|
||||
|
||||
// consumes returns whether the next byte is ch. If so, it gobbles it by
|
||||
// updating s.
|
||||
func consume(s *string, ch byte) (ok bool) {
|
||||
if *s == "" || (*s)[0] != ch {
|
||||
return false
|
||||
}
|
||||
*s = (*s)[1:]
|
||||
return true
|
||||
}
|
||||
|
||||
// The following code parses Collation rules of CLDR version 24 and before.
|
||||
|
||||
var lmap = map[byte]int{
|
||||
'p': 1,
|
||||
's': 2,
|
||||
't': 3,
|
||||
'i': 5,
|
||||
}
|
||||
|
||||
type rulesElem struct {
|
||||
Rules struct {
|
||||
Common
|
||||
Any []*struct {
|
||||
XMLName xml.Name
|
||||
rule
|
||||
} `xml:",any"`
|
||||
} `xml:"rules"`
|
||||
}
|
||||
|
||||
type rule struct {
|
||||
Value string `xml:",chardata"`
|
||||
Before string `xml:"before,attr"`
|
||||
Any []*struct {
|
||||
XMLName xml.Name
|
||||
rule
|
||||
} `xml:",any"`
|
||||
}
|
||||
|
||||
var emptyValueError = errors.New("cldr: empty rule value")
|
||||
|
||||
func (r *rule) value() (string, error) {
|
||||
// Convert hexadecimal Unicode codepoint notation to a string.
|
||||
s := charRe.ReplaceAllStringFunc(r.Value, replaceUnicode)
|
||||
r.Value = s
|
||||
if s == "" {
|
||||
if len(r.Any) != 1 {
|
||||
return "", emptyValueError
|
||||
}
|
||||
r.Value = fmt.Sprintf(specialAnchor, r.Any[0].XMLName.Local)
|
||||
r.Any = nil
|
||||
} else if len(r.Any) != 0 {
|
||||
return "", fmt.Errorf("cldr: XML elements found in collation rule: %v", r.Any)
|
||||
}
|
||||
return r.Value, nil
|
||||
}
|
||||
|
||||
func (r rule) process(p RuleProcessor, name, context, extend string) error {
|
||||
v, err := r.value()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
switch name {
|
||||
case "p", "s", "t", "i":
|
||||
if strings.HasPrefix(v, cldrIndex) {
|
||||
p.Index(v[len(cldrIndex):])
|
||||
return nil
|
||||
}
|
||||
if err := p.Insert(lmap[name[0]], v, context, extend); err != nil {
|
||||
return err
|
||||
}
|
||||
case "pc", "sc", "tc", "ic":
|
||||
level := lmap[name[0]]
|
||||
for _, s := range v {
|
||||
if err := p.Insert(level, string(s), context, extend); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("cldr: unsupported tag: %q", name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// processXML parses the format of CLDR versions 24 and older.
|
||||
func (c Collation) processXML(p RuleProcessor) (err error) {
|
||||
// Collation is generated and defined in xml.go.
|
||||
var v string
|
||||
for _, r := range c.Rules.Any {
|
||||
switch r.XMLName.Local {
|
||||
case "reset":
|
||||
level := 0
|
||||
switch r.Before {
|
||||
case "primary", "1":
|
||||
level = 1
|
||||
case "secondary", "2":
|
||||
level = 2
|
||||
case "tertiary", "3":
|
||||
level = 3
|
||||
case "":
|
||||
default:
|
||||
return fmt.Errorf("cldr: unknown level %q", r.Before)
|
||||
}
|
||||
v, err = r.value()
|
||||
if err == nil {
|
||||
err = p.Reset(v, level)
|
||||
}
|
||||
case "x":
|
||||
var context, extend string
|
||||
for _, r1 := range r.Any {
|
||||
v, err = r1.value()
|
||||
switch r1.XMLName.Local {
|
||||
case "context":
|
||||
context = v
|
||||
case "extend":
|
||||
extend = v
|
||||
}
|
||||
}
|
||||
for _, r1 := range r.Any {
|
||||
if t := r1.XMLName.Local; t == "context" || t == "extend" {
|
||||
continue
|
||||
}
|
||||
r1.rule.process(p, r1.XMLName.Local, context, extend)
|
||||
}
|
||||
default:
|
||||
err = r.rule.process(p, r.XMLName.Local, "", "")
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
171
vendor/golang.org/x/text/unicode/cldr/decode.go
generated
vendored
Normal file
171
vendor/golang.org/x/text/unicode/cldr/decode.go
generated
vendored
Normal file
@@ -0,0 +1,171 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cldr
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
)
|
||||
|
||||
// A Decoder loads an archive of CLDR data.
|
||||
type Decoder struct {
|
||||
dirFilter []string
|
||||
sectionFilter []string
|
||||
loader Loader
|
||||
cldr *CLDR
|
||||
curLocale string
|
||||
}
|
||||
|
||||
// SetSectionFilter takes a list top-level LDML element names to which
|
||||
// evaluation of LDML should be limited. It automatically calls SetDirFilter.
|
||||
func (d *Decoder) SetSectionFilter(filter ...string) {
|
||||
d.sectionFilter = filter
|
||||
// TODO: automatically set dir filter
|
||||
}
|
||||
|
||||
// SetDirFilter limits the loading of LDML XML files of the specied directories.
|
||||
// Note that sections may be split across directories differently for different CLDR versions.
|
||||
// For more robust code, use SetSectionFilter.
|
||||
func (d *Decoder) SetDirFilter(dir ...string) {
|
||||
d.dirFilter = dir
|
||||
}
|
||||
|
||||
// A Loader provides access to the files of a CLDR archive.
|
||||
type Loader interface {
|
||||
Len() int
|
||||
Path(i int) string
|
||||
Reader(i int) (io.ReadCloser, error)
|
||||
}
|
||||
|
||||
var fileRe = regexp.MustCompile(".*/(.*)/(.*)\\.xml")
|
||||
|
||||
// Decode loads and decodes the files represented by l.
|
||||
func (d *Decoder) Decode(l Loader) (cldr *CLDR, err error) {
|
||||
d.cldr = makeCLDR()
|
||||
for i := 0; i < l.Len(); i++ {
|
||||
fname := l.Path(i)
|
||||
if m := fileRe.FindStringSubmatch(fname); m != nil {
|
||||
if len(d.dirFilter) > 0 && !in(d.dirFilter, m[1]) {
|
||||
continue
|
||||
}
|
||||
var r io.Reader
|
||||
if r, err = l.Reader(i); err == nil {
|
||||
err = d.decode(m[1], m[2], r)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
d.cldr.finalize(d.sectionFilter)
|
||||
return d.cldr, nil
|
||||
}
|
||||
|
||||
func (d *Decoder) decode(dir, id string, r io.Reader) error {
|
||||
var v interface{}
|
||||
var l *LDML
|
||||
cldr := d.cldr
|
||||
switch {
|
||||
case dir == "supplemental":
|
||||
v = cldr.supp
|
||||
case dir == "transforms":
|
||||
return nil
|
||||
case dir == "bcp47":
|
||||
v = cldr.bcp47
|
||||
case dir == "validity":
|
||||
return nil
|
||||
default:
|
||||
ok := false
|
||||
if v, ok = cldr.locale[id]; !ok {
|
||||
l = &LDML{}
|
||||
v, cldr.locale[id] = l, l
|
||||
}
|
||||
}
|
||||
x := xml.NewDecoder(r)
|
||||
if err := x.Decode(v); err != nil {
|
||||
log.Printf("%s/%s: %v", dir, id, err)
|
||||
return err
|
||||
}
|
||||
if l != nil {
|
||||
if l.Identity == nil {
|
||||
return fmt.Errorf("%s/%s: missing identity element", dir, id)
|
||||
}
|
||||
// TODO: verify when CLDR bug http://unicode.org/cldr/trac/ticket/8970
|
||||
// is resolved.
|
||||
// path := strings.Split(id, "_")
|
||||
// if lang := l.Identity.Language.Type; lang != path[0] {
|
||||
// return fmt.Errorf("%s/%s: language was %s; want %s", dir, id, lang, path[0])
|
||||
// }
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type pathLoader []string
|
||||
|
||||
func makePathLoader(path string) (pl pathLoader, err error) {
|
||||
err = filepath.Walk(path, func(path string, _ os.FileInfo, err error) error {
|
||||
pl = append(pl, path)
|
||||
return err
|
||||
})
|
||||
return pl, err
|
||||
}
|
||||
|
||||
func (pl pathLoader) Len() int {
|
||||
return len(pl)
|
||||
}
|
||||
|
||||
func (pl pathLoader) Path(i int) string {
|
||||
return pl[i]
|
||||
}
|
||||
|
||||
func (pl pathLoader) Reader(i int) (io.ReadCloser, error) {
|
||||
return os.Open(pl[i])
|
||||
}
|
||||
|
||||
// DecodePath loads CLDR data from the given path.
|
||||
func (d *Decoder) DecodePath(path string) (cldr *CLDR, err error) {
|
||||
loader, err := makePathLoader(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d.Decode(loader)
|
||||
}
|
||||
|
||||
type zipLoader struct {
|
||||
r *zip.Reader
|
||||
}
|
||||
|
||||
func (zl zipLoader) Len() int {
|
||||
return len(zl.r.File)
|
||||
}
|
||||
|
||||
func (zl zipLoader) Path(i int) string {
|
||||
return zl.r.File[i].Name
|
||||
}
|
||||
|
||||
func (zl zipLoader) Reader(i int) (io.ReadCloser, error) {
|
||||
return zl.r.File[i].Open()
|
||||
}
|
||||
|
||||
// DecodeZip loads CLDR data from the zip archive for which r is the source.
|
||||
func (d *Decoder) DecodeZip(r io.Reader) (cldr *CLDR, err error) {
|
||||
buffer, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
archive, err := zip.NewReader(bytes.NewReader(buffer), int64(len(buffer)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return d.Decode(zipLoader{archive})
|
||||
}
|
||||
400
vendor/golang.org/x/text/unicode/cldr/makexml.go
generated
vendored
Normal file
400
vendor/golang.org/x/text/unicode/cldr/makexml.go
generated
vendored
Normal file
@@ -0,0 +1,400 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
// This tool generates types for the various XML formats of CLDR.
|
||||
package main
|
||||
|
||||
import (
|
||||
"archive/zip"
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/text/internal/gen"
|
||||
)
|
||||
|
||||
var outputFile = flag.String("output", "xml.go", "output file name")
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
r := gen.OpenCLDRCoreZip()
|
||||
buffer, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
log.Fatal("Could not read zip file")
|
||||
}
|
||||
r.Close()
|
||||
z, err := zip.NewReader(bytes.NewReader(buffer), int64(len(buffer)))
|
||||
if err != nil {
|
||||
log.Fatalf("Could not read zip archive: %v", err)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
|
||||
version := gen.CLDRVersion()
|
||||
|
||||
for _, dtd := range files {
|
||||
for _, f := range z.File {
|
||||
if strings.HasSuffix(f.Name, dtd.file+".dtd") {
|
||||
r, err := f.Open()
|
||||
failOnError(err)
|
||||
|
||||
b := makeBuilder(&buf, dtd)
|
||||
b.parseDTD(r)
|
||||
b.resolve(b.index[dtd.top[0]])
|
||||
b.write()
|
||||
if b.version != "" && version != b.version {
|
||||
println(f.Name)
|
||||
log.Fatalf("main: inconsistent versions: found %s; want %s", b.version, version)
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
fmt.Fprintln(&buf, "// Version is the version of CLDR from which the XML definitions are generated.")
|
||||
fmt.Fprintf(&buf, "const Version = %q\n", version)
|
||||
|
||||
gen.WriteGoFile(*outputFile, "cldr", buf.Bytes())
|
||||
}
|
||||
|
||||
func failOnError(err error) {
|
||||
if err != nil {
|
||||
log.New(os.Stderr, "", log.Lshortfile).Output(2, err.Error())
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// configuration data per DTD type
|
||||
type dtd struct {
|
||||
file string // base file name
|
||||
root string // Go name of the root XML element
|
||||
top []string // create a different type for this section
|
||||
|
||||
skipElem []string // hard-coded or deprecated elements
|
||||
skipAttr []string // attributes to exclude
|
||||
predefined []string // hard-coded elements exist of the form <name>Elem
|
||||
forceRepeat []string // elements to make slices despite DTD
|
||||
}
|
||||
|
||||
var files = []dtd{
|
||||
{
|
||||
file: "ldmlBCP47",
|
||||
root: "LDMLBCP47",
|
||||
top: []string{"ldmlBCP47"},
|
||||
skipElem: []string{
|
||||
"cldrVersion", // deprecated, not used
|
||||
},
|
||||
},
|
||||
{
|
||||
file: "ldmlSupplemental",
|
||||
root: "SupplementalData",
|
||||
top: []string{"supplementalData"},
|
||||
skipElem: []string{
|
||||
"cldrVersion", // deprecated, not used
|
||||
},
|
||||
forceRepeat: []string{
|
||||
"plurals", // data defined in plurals.xml and ordinals.xml
|
||||
},
|
||||
},
|
||||
{
|
||||
file: "ldml",
|
||||
root: "LDML",
|
||||
top: []string{
|
||||
"ldml", "collation", "calendar", "timeZoneNames", "localeDisplayNames", "numbers",
|
||||
},
|
||||
skipElem: []string{
|
||||
"cp", // not used anywhere
|
||||
"special", // not used anywhere
|
||||
"fallback", // deprecated, not used
|
||||
"alias", // in Common
|
||||
"default", // in Common
|
||||
},
|
||||
skipAttr: []string{
|
||||
"hiraganaQuarternary", // typo in DTD, correct version included as well
|
||||
},
|
||||
predefined: []string{"rules"},
|
||||
},
|
||||
}
|
||||
|
||||
var comments = map[string]string{
|
||||
"ldmlBCP47": `
|
||||
// LDMLBCP47 holds information on allowable values for various variables in LDML.
|
||||
`,
|
||||
"supplementalData": `
|
||||
// SupplementalData holds information relevant for internationalization
|
||||
// and proper use of CLDR, but that is not contained in the locale hierarchy.
|
||||
`,
|
||||
"ldml": `
|
||||
// LDML is the top-level type for locale-specific data.
|
||||
`,
|
||||
"collation": `
|
||||
// Collation contains rules that specify a certain sort-order,
|
||||
// as a tailoring of the root order.
|
||||
// The parsed rules are obtained by passing a RuleProcessor to Collation's
|
||||
// Process method.
|
||||
`,
|
||||
"calendar": `
|
||||
// Calendar specifies the fields used for formatting and parsing dates and times.
|
||||
// The month and quarter names are identified numerically, starting at 1.
|
||||
// The day (of the week) names are identified with short strings, since there is
|
||||
// no universally-accepted numeric designation.
|
||||
`,
|
||||
"dates": `
|
||||
// Dates contains information regarding the format and parsing of dates and times.
|
||||
`,
|
||||
"localeDisplayNames": `
|
||||
// LocaleDisplayNames specifies localized display names for for scripts, languages,
|
||||
// countries, currencies, and variants.
|
||||
`,
|
||||
"numbers": `
|
||||
// Numbers supplies information for formatting and parsing numbers and currencies.
|
||||
`,
|
||||
}
|
||||
|
||||
type element struct {
|
||||
name string // XML element name
|
||||
category string // elements contained by this element
|
||||
signature string // category + attrKey*
|
||||
|
||||
attr []*attribute // attributes supported by this element.
|
||||
sub []struct { // parsed and evaluated sub elements of this element.
|
||||
e *element
|
||||
repeat bool // true if the element needs to be a slice
|
||||
}
|
||||
|
||||
resolved bool // prevent multiple resolutions of this element.
|
||||
}
|
||||
|
||||
type attribute struct {
|
||||
name string
|
||||
key string
|
||||
list []string
|
||||
|
||||
tag string // Go tag
|
||||
}
|
||||
|
||||
var (
|
||||
reHead = regexp.MustCompile(` *(\w+) +([\w\-]+)`)
|
||||
reAttr = regexp.MustCompile(` *(\w+) *(?:(\w+)|\(([\w\- \|]+)\)) *(?:#([A-Z]*) *(?:\"([\.\d+])\")?)? *("[\w\-:]*")?`)
|
||||
reElem = regexp.MustCompile(`^ *(EMPTY|ANY|\(.*\)[\*\+\?]?) *$`)
|
||||
reToken = regexp.MustCompile(`\w\-`)
|
||||
)
|
||||
|
||||
// builder is used to read in the DTD files from CLDR and generate Go code
|
||||
// to be used with the encoding/xml package.
|
||||
type builder struct {
|
||||
w io.Writer
|
||||
index map[string]*element
|
||||
elem []*element
|
||||
info dtd
|
||||
version string
|
||||
}
|
||||
|
||||
func makeBuilder(w io.Writer, d dtd) builder {
|
||||
return builder{
|
||||
w: w,
|
||||
index: make(map[string]*element),
|
||||
elem: []*element{},
|
||||
info: d,
|
||||
}
|
||||
}
|
||||
|
||||
// parseDTD parses a DTD file.
|
||||
func (b *builder) parseDTD(r io.Reader) {
|
||||
for d := xml.NewDecoder(r); ; {
|
||||
t, err := d.Token()
|
||||
if t == nil {
|
||||
break
|
||||
}
|
||||
failOnError(err)
|
||||
dir, ok := t.(xml.Directive)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
m := reHead.FindSubmatch(dir)
|
||||
dir = dir[len(m[0]):]
|
||||
ename := string(m[2])
|
||||
el, elementFound := b.index[ename]
|
||||
switch string(m[1]) {
|
||||
case "ELEMENT":
|
||||
if elementFound {
|
||||
log.Fatal("parseDTD: duplicate entry for element %q", ename)
|
||||
}
|
||||
m := reElem.FindSubmatch(dir)
|
||||
if m == nil {
|
||||
log.Fatalf("parseDTD: invalid element %q", string(dir))
|
||||
}
|
||||
if len(m[0]) != len(dir) {
|
||||
log.Fatal("parseDTD: invalid element %q", string(dir), len(dir), len(m[0]), string(m[0]))
|
||||
}
|
||||
s := string(m[1])
|
||||
el = &element{
|
||||
name: ename,
|
||||
category: s,
|
||||
}
|
||||
b.index[ename] = el
|
||||
case "ATTLIST":
|
||||
if !elementFound {
|
||||
log.Fatalf("parseDTD: unknown element %q", ename)
|
||||
}
|
||||
s := string(dir)
|
||||
m := reAttr.FindStringSubmatch(s)
|
||||
if m == nil {
|
||||
log.Fatal(fmt.Errorf("parseDTD: invalid attribute %q", string(dir)))
|
||||
}
|
||||
if m[4] == "FIXED" {
|
||||
b.version = m[5]
|
||||
} else {
|
||||
switch m[1] {
|
||||
case "draft", "references", "alt", "validSubLocales", "standard" /* in Common */ :
|
||||
case "type", "choice":
|
||||
default:
|
||||
el.attr = append(el.attr, &attribute{
|
||||
name: m[1],
|
||||
key: s,
|
||||
list: reToken.FindAllString(m[3], -1),
|
||||
})
|
||||
el.signature = fmt.Sprintf("%s=%s+%s", el.signature, m[1], m[2])
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var reCat = regexp.MustCompile(`[ ,\|]*(?:(\(|\)|\#?[\w_-]+)([\*\+\?]?))?`)
|
||||
|
||||
// resolve takes a parsed element and converts it into structured data
|
||||
// that can be used to generate the XML code.
|
||||
func (b *builder) resolve(e *element) {
|
||||
if e.resolved {
|
||||
return
|
||||
}
|
||||
b.elem = append(b.elem, e)
|
||||
e.resolved = true
|
||||
s := e.category
|
||||
found := make(map[string]bool)
|
||||
sequenceStart := []int{}
|
||||
for len(s) > 0 {
|
||||
m := reCat.FindStringSubmatch(s)
|
||||
if m == nil {
|
||||
log.Fatalf("%s: invalid category string %q", e.name, s)
|
||||
}
|
||||
repeat := m[2] == "*" || m[2] == "+" || in(b.info.forceRepeat, m[1])
|
||||
switch m[1] {
|
||||
case "":
|
||||
case "(":
|
||||
sequenceStart = append(sequenceStart, len(e.sub))
|
||||
case ")":
|
||||
if len(sequenceStart) == 0 {
|
||||
log.Fatalf("%s: unmatched closing parenthesis", e.name)
|
||||
}
|
||||
for i := sequenceStart[len(sequenceStart)-1]; i < len(e.sub); i++ {
|
||||
e.sub[i].repeat = e.sub[i].repeat || repeat
|
||||
}
|
||||
sequenceStart = sequenceStart[:len(sequenceStart)-1]
|
||||
default:
|
||||
if in(b.info.skipElem, m[1]) {
|
||||
} else if sub, ok := b.index[m[1]]; ok {
|
||||
if !found[sub.name] {
|
||||
e.sub = append(e.sub, struct {
|
||||
e *element
|
||||
repeat bool
|
||||
}{sub, repeat})
|
||||
found[sub.name] = true
|
||||
b.resolve(sub)
|
||||
}
|
||||
} else if m[1] == "#PCDATA" || m[1] == "ANY" {
|
||||
} else if m[1] != "EMPTY" {
|
||||
log.Fatalf("resolve:%s: element %q not found", e.name, m[1])
|
||||
}
|
||||
}
|
||||
s = s[len(m[0]):]
|
||||
}
|
||||
}
|
||||
|
||||
// return true if s is contained in set.
|
||||
func in(set []string, s string) bool {
|
||||
for _, v := range set {
|
||||
if v == s {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
var repl = strings.NewReplacer("-", " ", "_", " ")
|
||||
|
||||
// title puts the first character or each character following '_' in title case and
|
||||
// removes all occurrences of '_'.
|
||||
func title(s string) string {
|
||||
return strings.Replace(strings.Title(repl.Replace(s)), " ", "", -1)
|
||||
}
|
||||
|
||||
// writeElem generates Go code for a single element, recursively.
|
||||
func (b *builder) writeElem(tab int, e *element) {
|
||||
p := func(f string, x ...interface{}) {
|
||||
f = strings.Replace(f, "\n", "\n"+strings.Repeat("\t", tab), -1)
|
||||
fmt.Fprintf(b.w, f, x...)
|
||||
}
|
||||
if len(e.sub) == 0 && len(e.attr) == 0 {
|
||||
p("Common")
|
||||
return
|
||||
}
|
||||
p("struct {")
|
||||
tab++
|
||||
p("\nCommon")
|
||||
for _, attr := range e.attr {
|
||||
if !in(b.info.skipAttr, attr.name) {
|
||||
p("\n%s string `xml:\"%s,attr\"`", title(attr.name), attr.name)
|
||||
}
|
||||
}
|
||||
for _, sub := range e.sub {
|
||||
if in(b.info.predefined, sub.e.name) {
|
||||
p("\n%sElem", sub.e.name)
|
||||
continue
|
||||
}
|
||||
if in(b.info.skipElem, sub.e.name) {
|
||||
continue
|
||||
}
|
||||
p("\n%s ", title(sub.e.name))
|
||||
if sub.repeat {
|
||||
p("[]")
|
||||
}
|
||||
p("*")
|
||||
if in(b.info.top, sub.e.name) {
|
||||
p(title(sub.e.name))
|
||||
} else {
|
||||
b.writeElem(tab, sub.e)
|
||||
}
|
||||
p(" `xml:\"%s\"`", sub.e.name)
|
||||
}
|
||||
tab--
|
||||
p("\n}")
|
||||
}
|
||||
|
||||
// write generates the Go XML code.
|
||||
func (b *builder) write() {
|
||||
for i, name := range b.info.top {
|
||||
e := b.index[name]
|
||||
if e != nil {
|
||||
fmt.Fprintf(b.w, comments[name])
|
||||
name := title(e.name)
|
||||
if i == 0 {
|
||||
name = b.info.root
|
||||
}
|
||||
fmt.Fprintf(b.w, "type %s ", name)
|
||||
b.writeElem(0, e)
|
||||
fmt.Fprint(b.w, "\n")
|
||||
}
|
||||
}
|
||||
}
|
||||
602
vendor/golang.org/x/text/unicode/cldr/resolve.go
generated
vendored
Normal file
602
vendor/golang.org/x/text/unicode/cldr/resolve.go
generated
vendored
Normal file
@@ -0,0 +1,602 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cldr
|
||||
|
||||
// This file implements the various inheritance constructs defined by LDML.
|
||||
// See http://www.unicode.org/reports/tr35/#Inheritance_and_Validity
|
||||
// for more details.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// fieldIter iterates over fields in a struct. It includes
|
||||
// fields of embedded structs.
|
||||
type fieldIter struct {
|
||||
v reflect.Value
|
||||
index, n []int
|
||||
}
|
||||
|
||||
func iter(v reflect.Value) fieldIter {
|
||||
if v.Kind() != reflect.Struct {
|
||||
log.Panicf("value %v must be a struct", v)
|
||||
}
|
||||
i := fieldIter{
|
||||
v: v,
|
||||
index: []int{0},
|
||||
n: []int{v.NumField()},
|
||||
}
|
||||
i.descent()
|
||||
return i
|
||||
}
|
||||
|
||||
func (i *fieldIter) descent() {
|
||||
for f := i.field(); f.Anonymous && f.Type.NumField() > 0; f = i.field() {
|
||||
i.index = append(i.index, 0)
|
||||
i.n = append(i.n, f.Type.NumField())
|
||||
}
|
||||
}
|
||||
|
||||
func (i *fieldIter) done() bool {
|
||||
return len(i.index) == 1 && i.index[0] >= i.n[0]
|
||||
}
|
||||
|
||||
func skip(f reflect.StructField) bool {
|
||||
return !f.Anonymous && (f.Name[0] < 'A' || f.Name[0] > 'Z')
|
||||
}
|
||||
|
||||
func (i *fieldIter) next() {
|
||||
for {
|
||||
k := len(i.index) - 1
|
||||
i.index[k]++
|
||||
if i.index[k] < i.n[k] {
|
||||
if !skip(i.field()) {
|
||||
break
|
||||
}
|
||||
} else {
|
||||
if k == 0 {
|
||||
return
|
||||
}
|
||||
i.index = i.index[:k]
|
||||
i.n = i.n[:k]
|
||||
}
|
||||
}
|
||||
i.descent()
|
||||
}
|
||||
|
||||
func (i *fieldIter) value() reflect.Value {
|
||||
return i.v.FieldByIndex(i.index)
|
||||
}
|
||||
|
||||
func (i *fieldIter) field() reflect.StructField {
|
||||
return i.v.Type().FieldByIndex(i.index)
|
||||
}
|
||||
|
||||
type visitor func(v reflect.Value) error
|
||||
|
||||
var stopDescent = fmt.Errorf("do not recurse")
|
||||
|
||||
func (f visitor) visit(x interface{}) error {
|
||||
return f.visitRec(reflect.ValueOf(x))
|
||||
}
|
||||
|
||||
// visit recursively calls f on all nodes in v.
|
||||
func (f visitor) visitRec(v reflect.Value) error {
|
||||
if v.Kind() == reflect.Ptr {
|
||||
if v.IsNil() {
|
||||
return nil
|
||||
}
|
||||
return f.visitRec(v.Elem())
|
||||
}
|
||||
if err := f(v); err != nil {
|
||||
if err == stopDescent {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
switch v.Kind() {
|
||||
case reflect.Struct:
|
||||
for i := iter(v); !i.done(); i.next() {
|
||||
if err := f.visitRec(i.value()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
case reflect.Slice:
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
if err := f.visitRec(v.Index(i)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getPath is used for error reporting purposes only.
|
||||
func getPath(e Elem) string {
|
||||
if e == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
if e.enclosing() == nil {
|
||||
return e.GetCommon().name
|
||||
}
|
||||
if e.GetCommon().Type == "" {
|
||||
return fmt.Sprintf("%s.%s", getPath(e.enclosing()), e.GetCommon().name)
|
||||
}
|
||||
return fmt.Sprintf("%s.%s[type=%s]", getPath(e.enclosing()), e.GetCommon().name, e.GetCommon().Type)
|
||||
}
|
||||
|
||||
// xmlName returns the xml name of the element or attribute
|
||||
func xmlName(f reflect.StructField) (name string, attr bool) {
|
||||
tags := strings.Split(f.Tag.Get("xml"), ",")
|
||||
for _, s := range tags {
|
||||
attr = attr || s == "attr"
|
||||
}
|
||||
return tags[0], attr
|
||||
}
|
||||
|
||||
func findField(v reflect.Value, key string) (reflect.Value, error) {
|
||||
v = reflect.Indirect(v)
|
||||
for i := iter(v); !i.done(); i.next() {
|
||||
if n, _ := xmlName(i.field()); n == key {
|
||||
return i.value(), nil
|
||||
}
|
||||
}
|
||||
return reflect.Value{}, fmt.Errorf("cldr: no field %q in element %#v", key, v.Interface())
|
||||
}
|
||||
|
||||
var xpathPart = regexp.MustCompile(`(\pL+)(?:\[@(\pL+)='([\w-]+)'\])?`)
|
||||
|
||||
func walkXPath(e Elem, path string) (res Elem, err error) {
|
||||
for _, c := range strings.Split(path, "/") {
|
||||
if c == ".." {
|
||||
if e = e.enclosing(); e == nil {
|
||||
panic("path ..")
|
||||
return nil, fmt.Errorf(`cldr: ".." moves past root in path %q`, path)
|
||||
}
|
||||
continue
|
||||
} else if c == "" {
|
||||
continue
|
||||
}
|
||||
m := xpathPart.FindStringSubmatch(c)
|
||||
if len(m) == 0 || len(m[0]) != len(c) {
|
||||
return nil, fmt.Errorf("cldr: syntax error in path component %q", c)
|
||||
}
|
||||
v, err := findField(reflect.ValueOf(e), m[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
switch v.Kind() {
|
||||
case reflect.Slice:
|
||||
i := 0
|
||||
if m[2] != "" || v.Len() > 1 {
|
||||
if m[2] == "" {
|
||||
m[2] = "type"
|
||||
if m[3] = e.GetCommon().Default(); m[3] == "" {
|
||||
return nil, fmt.Errorf("cldr: type selector or default value needed for element %s", m[1])
|
||||
}
|
||||
}
|
||||
for ; i < v.Len(); i++ {
|
||||
vi := v.Index(i)
|
||||
key, err := findField(vi.Elem(), m[2])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
key = reflect.Indirect(key)
|
||||
if key.Kind() == reflect.String && key.String() == m[3] {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if i == v.Len() || v.Index(i).IsNil() {
|
||||
return nil, fmt.Errorf("no %s found with %s==%s", m[1], m[2], m[3])
|
||||
}
|
||||
e = v.Index(i).Interface().(Elem)
|
||||
case reflect.Ptr:
|
||||
if v.IsNil() {
|
||||
return nil, fmt.Errorf("cldr: element %q not found within element %q", m[1], e.GetCommon().name)
|
||||
}
|
||||
var ok bool
|
||||
if e, ok = v.Interface().(Elem); !ok {
|
||||
return nil, fmt.Errorf("cldr: %q is not an XML element", m[1])
|
||||
} else if m[2] != "" || m[3] != "" {
|
||||
return nil, fmt.Errorf("cldr: no type selector allowed for element %s", m[1])
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("cldr: %q is not an XML element", m[1])
|
||||
}
|
||||
}
|
||||
return e, nil
|
||||
}
|
||||
|
||||
const absPrefix = "//ldml/"
|
||||
|
||||
func (cldr *CLDR) resolveAlias(e Elem, src, path string) (res Elem, err error) {
|
||||
if src != "locale" {
|
||||
if !strings.HasPrefix(path, absPrefix) {
|
||||
return nil, fmt.Errorf("cldr: expected absolute path, found %q", path)
|
||||
}
|
||||
path = path[len(absPrefix):]
|
||||
if e, err = cldr.resolve(src); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return walkXPath(e, path)
|
||||
}
|
||||
|
||||
func (cldr *CLDR) resolveAndMergeAlias(e Elem) error {
|
||||
alias := e.GetCommon().Alias
|
||||
if alias == nil {
|
||||
return nil
|
||||
}
|
||||
a, err := cldr.resolveAlias(e, alias.Source, alias.Path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("%v: error evaluating path %q: %v", getPath(e), alias.Path, err)
|
||||
}
|
||||
// Ensure alias node was already evaluated. TODO: avoid double evaluation.
|
||||
err = cldr.resolveAndMergeAlias(a)
|
||||
v := reflect.ValueOf(e).Elem()
|
||||
for i := iter(reflect.ValueOf(a).Elem()); !i.done(); i.next() {
|
||||
if vv := i.value(); vv.Kind() != reflect.Ptr || !vv.IsNil() {
|
||||
if _, attr := xmlName(i.field()); !attr {
|
||||
v.FieldByIndex(i.index).Set(vv)
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (cldr *CLDR) aliasResolver() visitor {
|
||||
return func(v reflect.Value) (err error) {
|
||||
if e, ok := v.Addr().Interface().(Elem); ok {
|
||||
err = cldr.resolveAndMergeAlias(e)
|
||||
if err == nil && blocking[e.GetCommon().name] {
|
||||
return stopDescent
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// elements within blocking elements do not inherit.
|
||||
// Taken from CLDR's supplementalMetaData.xml.
|
||||
var blocking = map[string]bool{
|
||||
"identity": true,
|
||||
"supplementalData": true,
|
||||
"cldrTest": true,
|
||||
"collation": true,
|
||||
"transform": true,
|
||||
}
|
||||
|
||||
// Distinguishing attributes affect inheritance; two elements with different
|
||||
// distinguishing attributes are treated as different for purposes of inheritance,
|
||||
// except when such attributes occur in the indicated elements.
|
||||
// Taken from CLDR's supplementalMetaData.xml.
|
||||
var distinguishing = map[string][]string{
|
||||
"key": nil,
|
||||
"request_id": nil,
|
||||
"id": nil,
|
||||
"registry": nil,
|
||||
"alt": nil,
|
||||
"iso4217": nil,
|
||||
"iso3166": nil,
|
||||
"mzone": nil,
|
||||
"from": nil,
|
||||
"to": nil,
|
||||
"type": []string{
|
||||
"abbreviationFallback",
|
||||
"default",
|
||||
"mapping",
|
||||
"measurementSystem",
|
||||
"preferenceOrdering",
|
||||
},
|
||||
"numberSystem": nil,
|
||||
}
|
||||
|
||||
func in(set []string, s string) bool {
|
||||
for _, v := range set {
|
||||
if v == s {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// attrKey computes a key based on the distinguishable attributes of
|
||||
// an element and it's values.
|
||||
func attrKey(v reflect.Value, exclude ...string) string {
|
||||
parts := []string{}
|
||||
ename := v.Interface().(Elem).GetCommon().name
|
||||
v = v.Elem()
|
||||
for i := iter(v); !i.done(); i.next() {
|
||||
if name, attr := xmlName(i.field()); attr {
|
||||
if except, ok := distinguishing[name]; ok && !in(exclude, name) && !in(except, ename) {
|
||||
v := i.value()
|
||||
if v.Kind() == reflect.Ptr {
|
||||
v = v.Elem()
|
||||
}
|
||||
if v.IsValid() {
|
||||
parts = append(parts, fmt.Sprintf("%s=%s", name, v.String()))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
sort.Strings(parts)
|
||||
return strings.Join(parts, ";")
|
||||
}
|
||||
|
||||
// Key returns a key for e derived from all distinguishing attributes
|
||||
// except those specified by exclude.
|
||||
func Key(e Elem, exclude ...string) string {
|
||||
return attrKey(reflect.ValueOf(e), exclude...)
|
||||
}
|
||||
|
||||
// linkEnclosing sets the enclosing element as well as the name
|
||||
// for all sub-elements of child, recursively.
|
||||
func linkEnclosing(parent, child Elem) {
|
||||
child.setEnclosing(parent)
|
||||
v := reflect.ValueOf(child).Elem()
|
||||
for i := iter(v); !i.done(); i.next() {
|
||||
vf := i.value()
|
||||
if vf.Kind() == reflect.Slice {
|
||||
for j := 0; j < vf.Len(); j++ {
|
||||
linkEnclosing(child, vf.Index(j).Interface().(Elem))
|
||||
}
|
||||
} else if vf.Kind() == reflect.Ptr && !vf.IsNil() && vf.Elem().Kind() == reflect.Struct {
|
||||
linkEnclosing(child, vf.Interface().(Elem))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func setNames(e Elem, name string) {
|
||||
e.setName(name)
|
||||
v := reflect.ValueOf(e).Elem()
|
||||
for i := iter(v); !i.done(); i.next() {
|
||||
vf := i.value()
|
||||
name, _ = xmlName(i.field())
|
||||
if vf.Kind() == reflect.Slice {
|
||||
for j := 0; j < vf.Len(); j++ {
|
||||
setNames(vf.Index(j).Interface().(Elem), name)
|
||||
}
|
||||
} else if vf.Kind() == reflect.Ptr && !vf.IsNil() && vf.Elem().Kind() == reflect.Struct {
|
||||
setNames(vf.Interface().(Elem), name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// deepCopy copies elements of v recursively. All elements of v that may
|
||||
// be modified by inheritance are explicitly copied.
|
||||
func deepCopy(v reflect.Value) reflect.Value {
|
||||
switch v.Kind() {
|
||||
case reflect.Ptr:
|
||||
if v.IsNil() || v.Elem().Kind() != reflect.Struct {
|
||||
return v
|
||||
}
|
||||
nv := reflect.New(v.Elem().Type())
|
||||
nv.Elem().Set(v.Elem())
|
||||
deepCopyRec(nv.Elem(), v.Elem())
|
||||
return nv
|
||||
case reflect.Slice:
|
||||
nv := reflect.MakeSlice(v.Type(), v.Len(), v.Len())
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
deepCopyRec(nv.Index(i), v.Index(i))
|
||||
}
|
||||
return nv
|
||||
}
|
||||
panic("deepCopy: must be called with pointer or slice")
|
||||
}
|
||||
|
||||
// deepCopyRec is only called by deepCopy.
|
||||
func deepCopyRec(nv, v reflect.Value) {
|
||||
if v.Kind() == reflect.Struct {
|
||||
t := v.Type()
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
if name, attr := xmlName(t.Field(i)); name != "" && !attr {
|
||||
deepCopyRec(nv.Field(i), v.Field(i))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
nv.Set(deepCopy(v))
|
||||
}
|
||||
}
|
||||
|
||||
// newNode is used to insert a missing node during inheritance.
|
||||
func (cldr *CLDR) newNode(v, enc reflect.Value) reflect.Value {
|
||||
n := reflect.New(v.Type())
|
||||
for i := iter(v); !i.done(); i.next() {
|
||||
if name, attr := xmlName(i.field()); name == "" || attr {
|
||||
n.Elem().FieldByIndex(i.index).Set(i.value())
|
||||
}
|
||||
}
|
||||
n.Interface().(Elem).GetCommon().setEnclosing(enc.Addr().Interface().(Elem))
|
||||
return n
|
||||
}
|
||||
|
||||
// v, parent must be pointers to struct
|
||||
func (cldr *CLDR) inheritFields(v, parent reflect.Value) (res reflect.Value, err error) {
|
||||
t := v.Type()
|
||||
nv := reflect.New(t)
|
||||
nv.Elem().Set(v)
|
||||
for i := iter(v); !i.done(); i.next() {
|
||||
vf := i.value()
|
||||
f := i.field()
|
||||
name, attr := xmlName(f)
|
||||
if name == "" || attr {
|
||||
continue
|
||||
}
|
||||
pf := parent.FieldByIndex(i.index)
|
||||
if blocking[name] {
|
||||
if vf.IsNil() {
|
||||
vf = pf
|
||||
}
|
||||
nv.Elem().FieldByIndex(i.index).Set(deepCopy(vf))
|
||||
continue
|
||||
}
|
||||
switch f.Type.Kind() {
|
||||
case reflect.Ptr:
|
||||
if f.Type.Elem().Kind() == reflect.Struct {
|
||||
if !vf.IsNil() {
|
||||
if vf, err = cldr.inheritStructPtr(vf, pf); err != nil {
|
||||
return reflect.Value{}, err
|
||||
}
|
||||
vf.Interface().(Elem).setEnclosing(nv.Interface().(Elem))
|
||||
nv.Elem().FieldByIndex(i.index).Set(vf)
|
||||
} else if !pf.IsNil() {
|
||||
n := cldr.newNode(pf.Elem(), v)
|
||||
if vf, err = cldr.inheritStructPtr(n, pf); err != nil {
|
||||
return reflect.Value{}, err
|
||||
}
|
||||
vf.Interface().(Elem).setEnclosing(nv.Interface().(Elem))
|
||||
nv.Elem().FieldByIndex(i.index).Set(vf)
|
||||
}
|
||||
}
|
||||
case reflect.Slice:
|
||||
vf, err := cldr.inheritSlice(nv.Elem(), vf, pf)
|
||||
if err != nil {
|
||||
return reflect.Zero(t), err
|
||||
}
|
||||
nv.Elem().FieldByIndex(i.index).Set(vf)
|
||||
}
|
||||
}
|
||||
return nv, nil
|
||||
}
|
||||
|
||||
func root(e Elem) *LDML {
|
||||
for ; e.enclosing() != nil; e = e.enclosing() {
|
||||
}
|
||||
return e.(*LDML)
|
||||
}
|
||||
|
||||
// inheritStructPtr first merges possible aliases in with v and then inherits
|
||||
// any underspecified elements from parent.
|
||||
func (cldr *CLDR) inheritStructPtr(v, parent reflect.Value) (r reflect.Value, err error) {
|
||||
if !v.IsNil() {
|
||||
e := v.Interface().(Elem).GetCommon()
|
||||
alias := e.Alias
|
||||
if alias == nil && !parent.IsNil() {
|
||||
alias = parent.Interface().(Elem).GetCommon().Alias
|
||||
}
|
||||
if alias != nil {
|
||||
a, err := cldr.resolveAlias(v.Interface().(Elem), alias.Source, alias.Path)
|
||||
if a != nil {
|
||||
if v, err = cldr.inheritFields(v.Elem(), reflect.ValueOf(a).Elem()); err != nil {
|
||||
return reflect.Value{}, err
|
||||
}
|
||||
}
|
||||
}
|
||||
if !parent.IsNil() {
|
||||
return cldr.inheritFields(v.Elem(), parent.Elem())
|
||||
}
|
||||
} else if parent.IsNil() {
|
||||
panic("should not reach here")
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// Must be slice of struct pointers.
|
||||
func (cldr *CLDR) inheritSlice(enc, v, parent reflect.Value) (res reflect.Value, err error) {
|
||||
t := v.Type()
|
||||
index := make(map[string]reflect.Value)
|
||||
if !v.IsNil() {
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
vi := v.Index(i)
|
||||
key := attrKey(vi)
|
||||
index[key] = vi
|
||||
}
|
||||
}
|
||||
if !parent.IsNil() {
|
||||
for i := 0; i < parent.Len(); i++ {
|
||||
vi := parent.Index(i)
|
||||
key := attrKey(vi)
|
||||
if w, ok := index[key]; ok {
|
||||
index[key], err = cldr.inheritStructPtr(w, vi)
|
||||
} else {
|
||||
n := cldr.newNode(vi.Elem(), enc)
|
||||
index[key], err = cldr.inheritStructPtr(n, vi)
|
||||
}
|
||||
index[key].Interface().(Elem).setEnclosing(enc.Addr().Interface().(Elem))
|
||||
if err != nil {
|
||||
return v, err
|
||||
}
|
||||
}
|
||||
}
|
||||
keys := make([]string, 0, len(index))
|
||||
for k, _ := range index {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
sl := reflect.MakeSlice(t, len(index), len(index))
|
||||
for i, k := range keys {
|
||||
sl.Index(i).Set(index[k])
|
||||
}
|
||||
return sl, nil
|
||||
}
|
||||
|
||||
func parentLocale(loc string) string {
|
||||
parts := strings.Split(loc, "_")
|
||||
if len(parts) == 1 {
|
||||
return "root"
|
||||
}
|
||||
parts = parts[:len(parts)-1]
|
||||
key := strings.Join(parts, "_")
|
||||
return key
|
||||
}
|
||||
|
||||
func (cldr *CLDR) resolve(loc string) (res *LDML, err error) {
|
||||
if r := cldr.resolved[loc]; r != nil {
|
||||
return r, nil
|
||||
}
|
||||
x := cldr.RawLDML(loc)
|
||||
if x == nil {
|
||||
return nil, fmt.Errorf("cldr: unknown locale %q", loc)
|
||||
}
|
||||
var v reflect.Value
|
||||
if loc == "root" {
|
||||
x = deepCopy(reflect.ValueOf(x)).Interface().(*LDML)
|
||||
linkEnclosing(nil, x)
|
||||
err = cldr.aliasResolver().visit(x)
|
||||
} else {
|
||||
key := parentLocale(loc)
|
||||
var parent *LDML
|
||||
for ; cldr.locale[key] == nil; key = parentLocale(key) {
|
||||
}
|
||||
if parent, err = cldr.resolve(key); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
v, err = cldr.inheritFields(reflect.ValueOf(x).Elem(), reflect.ValueOf(parent).Elem())
|
||||
x = v.Interface().(*LDML)
|
||||
linkEnclosing(nil, x)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cldr.resolved[loc] = x
|
||||
return x, err
|
||||
}
|
||||
|
||||
// finalize finalizes the initialization of the raw LDML structs. It also
|
||||
// removed unwanted fields, as specified by filter, so that they will not
|
||||
// be unnecessarily evaluated.
|
||||
func (cldr *CLDR) finalize(filter []string) {
|
||||
for _, x := range cldr.locale {
|
||||
if filter != nil {
|
||||
v := reflect.ValueOf(x).Elem()
|
||||
t := v.Type()
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
name, _ := xmlName(f)
|
||||
if name != "" && name != "identity" && !in(filter, name) {
|
||||
v.Field(i).Set(reflect.Zero(f.Type))
|
||||
}
|
||||
}
|
||||
}
|
||||
linkEnclosing(nil, x) // for resolving aliases and paths
|
||||
setNames(x, "ldml")
|
||||
}
|
||||
}
|
||||
144
vendor/golang.org/x/text/unicode/cldr/slice.go
generated
vendored
Normal file
144
vendor/golang.org/x/text/unicode/cldr/slice.go
generated
vendored
Normal file
@@ -0,0 +1,144 @@
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cldr
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Slice provides utilities for modifying slices of elements.
|
||||
// It can be wrapped around any slice of which the element type implements
|
||||
// interface Elem.
|
||||
type Slice struct {
|
||||
ptr reflect.Value
|
||||
typ reflect.Type
|
||||
}
|
||||
|
||||
// Value returns the reflect.Value of the underlying slice.
|
||||
func (s *Slice) Value() reflect.Value {
|
||||
return s.ptr.Elem()
|
||||
}
|
||||
|
||||
// MakeSlice wraps a pointer to a slice of Elems.
|
||||
// It replaces the array pointed to by the slice so that subsequent modifications
|
||||
// do not alter the data in a CLDR type.
|
||||
// It panics if an incorrect type is passed.
|
||||
func MakeSlice(slicePtr interface{}) Slice {
|
||||
ptr := reflect.ValueOf(slicePtr)
|
||||
if ptr.Kind() != reflect.Ptr {
|
||||
panic(fmt.Sprintf("MakeSlice: argument must be pointer to slice, found %v", ptr.Type()))
|
||||
}
|
||||
sl := ptr.Elem()
|
||||
if sl.Kind() != reflect.Slice {
|
||||
panic(fmt.Sprintf("MakeSlice: argument must point to a slice, found %v", sl.Type()))
|
||||
}
|
||||
intf := reflect.TypeOf((*Elem)(nil)).Elem()
|
||||
if !sl.Type().Elem().Implements(intf) {
|
||||
panic(fmt.Sprintf("MakeSlice: element type of slice (%v) does not implement Elem", sl.Type().Elem()))
|
||||
}
|
||||
nsl := reflect.MakeSlice(sl.Type(), sl.Len(), sl.Len())
|
||||
reflect.Copy(nsl, sl)
|
||||
sl.Set(nsl)
|
||||
return Slice{
|
||||
ptr: ptr,
|
||||
typ: sl.Type().Elem().Elem(),
|
||||
}
|
||||
}
|
||||
|
||||
func (s Slice) indexForAttr(a string) []int {
|
||||
for i := iter(reflect.Zero(s.typ)); !i.done(); i.next() {
|
||||
if n, _ := xmlName(i.field()); n == a {
|
||||
return i.index
|
||||
}
|
||||
}
|
||||
panic(fmt.Sprintf("MakeSlice: no attribute %q for type %v", a, s.typ))
|
||||
}
|
||||
|
||||
// Filter filters s to only include elements for which fn returns true.
|
||||
func (s Slice) Filter(fn func(e Elem) bool) {
|
||||
k := 0
|
||||
sl := s.Value()
|
||||
for i := 0; i < sl.Len(); i++ {
|
||||
vi := sl.Index(i)
|
||||
if fn(vi.Interface().(Elem)) {
|
||||
sl.Index(k).Set(vi)
|
||||
k++
|
||||
}
|
||||
}
|
||||
sl.Set(sl.Slice(0, k))
|
||||
}
|
||||
|
||||
// Group finds elements in s for which fn returns the same value and groups
|
||||
// them in a new Slice.
|
||||
func (s Slice) Group(fn func(e Elem) string) []Slice {
|
||||
m := make(map[string][]reflect.Value)
|
||||
sl := s.Value()
|
||||
for i := 0; i < sl.Len(); i++ {
|
||||
vi := sl.Index(i)
|
||||
key := fn(vi.Interface().(Elem))
|
||||
m[key] = append(m[key], vi)
|
||||
}
|
||||
keys := []string{}
|
||||
for k, _ := range m {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
res := []Slice{}
|
||||
for _, k := range keys {
|
||||
nsl := reflect.New(sl.Type())
|
||||
nsl.Elem().Set(reflect.Append(nsl.Elem(), m[k]...))
|
||||
res = append(res, MakeSlice(nsl.Interface()))
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// SelectAnyOf filters s to contain only elements for which attr matches
|
||||
// any of the values.
|
||||
func (s Slice) SelectAnyOf(attr string, values ...string) {
|
||||
index := s.indexForAttr(attr)
|
||||
s.Filter(func(e Elem) bool {
|
||||
vf := reflect.ValueOf(e).Elem().FieldByIndex(index)
|
||||
return in(values, vf.String())
|
||||
})
|
||||
}
|
||||
|
||||
// SelectOnePerGroup filters s to include at most one element e per group of
|
||||
// elements matching Key(attr), where e has an attribute a that matches any
|
||||
// the values in v.
|
||||
// If more than one element in a group matches a value in v preference
|
||||
// is given to the element that matches the first value in v.
|
||||
func (s Slice) SelectOnePerGroup(a string, v []string) {
|
||||
index := s.indexForAttr(a)
|
||||
grouped := s.Group(func(e Elem) string { return Key(e, a) })
|
||||
sl := s.Value()
|
||||
sl.Set(sl.Slice(0, 0))
|
||||
for _, g := range grouped {
|
||||
e := reflect.Value{}
|
||||
found := len(v)
|
||||
gsl := g.Value()
|
||||
for i := 0; i < gsl.Len(); i++ {
|
||||
vi := gsl.Index(i).Elem().FieldByIndex(index)
|
||||
j := 0
|
||||
for ; j < len(v) && v[j] != vi.String(); j++ {
|
||||
}
|
||||
if j < found {
|
||||
found = j
|
||||
e = gsl.Index(i)
|
||||
}
|
||||
}
|
||||
if found < len(v) {
|
||||
sl.Set(reflect.Append(sl, e))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SelectDraft drops all elements from the list with a draft level smaller than d
|
||||
// and selects the highest draft level of the remaining.
|
||||
// This method assumes that the input CLDR is canonicalized.
|
||||
func (s Slice) SelectDraft(d Draft) {
|
||||
s.SelectOnePerGroup("draft", drafts[len(drafts)-2-int(d):])
|
||||
}
|
||||
1456
vendor/golang.org/x/text/unicode/cldr/xml.go
generated
vendored
Normal file
1456
vendor/golang.org/x/text/unicode/cldr/xml.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
514
vendor/golang.org/x/text/unicode/norm/composition.go
generated
vendored
Normal file
514
vendor/golang.org/x/text/unicode/norm/composition.go
generated
vendored
Normal file
@@ -0,0 +1,514 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package norm
|
||||
|
||||
import "unicode/utf8"
|
||||
|
||||
const (
|
||||
maxNonStarters = 30
|
||||
// The maximum number of characters needed for a buffer is
|
||||
// maxNonStarters + 1 for the starter + 1 for the GCJ
|
||||
maxBufferSize = maxNonStarters + 2
|
||||
maxNFCExpansion = 3 // NFC(0x1D160)
|
||||
maxNFKCExpansion = 18 // NFKC(0xFDFA)
|
||||
|
||||
maxByteBufferSize = utf8.UTFMax * maxBufferSize // 128
|
||||
)
|
||||
|
||||
// ssState is used for reporting the segment state after inserting a rune.
|
||||
// It is returned by streamSafe.next.
|
||||
type ssState int
|
||||
|
||||
const (
|
||||
// Indicates a rune was successfully added to the segment.
|
||||
ssSuccess ssState = iota
|
||||
// Indicates a rune starts a new segment and should not be added.
|
||||
ssStarter
|
||||
// Indicates a rune caused a segment overflow and a CGJ should be inserted.
|
||||
ssOverflow
|
||||
)
|
||||
|
||||
// streamSafe implements the policy of when a CGJ should be inserted.
|
||||
type streamSafe uint8
|
||||
|
||||
// mkStreamSafe is a shorthand for declaring a streamSafe var and calling
|
||||
// first on it.
|
||||
func mkStreamSafe(p Properties) streamSafe {
|
||||
return streamSafe(p.nTrailingNonStarters())
|
||||
}
|
||||
|
||||
// first inserts the first rune of a segment.
|
||||
func (ss *streamSafe) first(p Properties) {
|
||||
if *ss != 0 {
|
||||
panic("!= 0")
|
||||
}
|
||||
*ss = streamSafe(p.nTrailingNonStarters())
|
||||
}
|
||||
|
||||
// insert returns a ssState value to indicate whether a rune represented by p
|
||||
// can be inserted.
|
||||
func (ss *streamSafe) next(p Properties) ssState {
|
||||
if *ss > maxNonStarters {
|
||||
panic("streamSafe was not reset")
|
||||
}
|
||||
n := p.nLeadingNonStarters()
|
||||
if *ss += streamSafe(n); *ss > maxNonStarters {
|
||||
*ss = 0
|
||||
return ssOverflow
|
||||
}
|
||||
// The Stream-Safe Text Processing prescribes that the counting can stop
|
||||
// as soon as a starter is encountered. However, there are some starters,
|
||||
// like Jamo V and T, that can combine with other runes, leaving their
|
||||
// successive non-starters appended to the previous, possibly causing an
|
||||
// overflow. We will therefore consider any rune with a non-zero nLead to
|
||||
// be a non-starter. Note that it always hold that if nLead > 0 then
|
||||
// nLead == nTrail.
|
||||
if n == 0 {
|
||||
*ss = 0
|
||||
return ssStarter
|
||||
}
|
||||
return ssSuccess
|
||||
}
|
||||
|
||||
// backwards is used for checking for overflow and segment starts
|
||||
// when traversing a string backwards. Users do not need to call first
|
||||
// for the first rune. The state of the streamSafe retains the count of
|
||||
// the non-starters loaded.
|
||||
func (ss *streamSafe) backwards(p Properties) ssState {
|
||||
if *ss > maxNonStarters {
|
||||
panic("streamSafe was not reset")
|
||||
}
|
||||
c := *ss + streamSafe(p.nTrailingNonStarters())
|
||||
if c > maxNonStarters {
|
||||
return ssOverflow
|
||||
}
|
||||
*ss = c
|
||||
if p.nLeadingNonStarters() == 0 {
|
||||
return ssStarter
|
||||
}
|
||||
return ssSuccess
|
||||
}
|
||||
|
||||
func (ss streamSafe) isMax() bool {
|
||||
return ss == maxNonStarters
|
||||
}
|
||||
|
||||
// GraphemeJoiner is inserted after maxNonStarters non-starter runes.
|
||||
const GraphemeJoiner = "\u034F"
|
||||
|
||||
// reorderBuffer is used to normalize a single segment. Characters inserted with
|
||||
// insert are decomposed and reordered based on CCC. The compose method can
|
||||
// be used to recombine characters. Note that the byte buffer does not hold
|
||||
// the UTF-8 characters in order. Only the rune array is maintained in sorted
|
||||
// order. flush writes the resulting segment to a byte array.
|
||||
type reorderBuffer struct {
|
||||
rune [maxBufferSize]Properties // Per character info.
|
||||
byte [maxByteBufferSize]byte // UTF-8 buffer. Referenced by runeInfo.pos.
|
||||
nbyte uint8 // Number or bytes.
|
||||
ss streamSafe // For limiting length of non-starter sequence.
|
||||
nrune int // Number of runeInfos.
|
||||
f formInfo
|
||||
|
||||
src input
|
||||
nsrc int
|
||||
tmpBytes input
|
||||
|
||||
out []byte
|
||||
flushF func(*reorderBuffer) bool
|
||||
}
|
||||
|
||||
func (rb *reorderBuffer) init(f Form, src []byte) {
|
||||
rb.f = *formTable[f]
|
||||
rb.src.setBytes(src)
|
||||
rb.nsrc = len(src)
|
||||
rb.ss = 0
|
||||
}
|
||||
|
||||
func (rb *reorderBuffer) initString(f Form, src string) {
|
||||
rb.f = *formTable[f]
|
||||
rb.src.setString(src)
|
||||
rb.nsrc = len(src)
|
||||
rb.ss = 0
|
||||
}
|
||||
|
||||
func (rb *reorderBuffer) setFlusher(out []byte, f func(*reorderBuffer) bool) {
|
||||
rb.out = out
|
||||
rb.flushF = f
|
||||
}
|
||||
|
||||
// reset discards all characters from the buffer.
|
||||
func (rb *reorderBuffer) reset() {
|
||||
rb.nrune = 0
|
||||
rb.nbyte = 0
|
||||
rb.ss = 0
|
||||
}
|
||||
|
||||
func (rb *reorderBuffer) doFlush() bool {
|
||||
if rb.f.composing {
|
||||
rb.compose()
|
||||
}
|
||||
res := rb.flushF(rb)
|
||||
rb.reset()
|
||||
return res
|
||||
}
|
||||
|
||||
// appendFlush appends the normalized segment to rb.out.
|
||||
func appendFlush(rb *reorderBuffer) bool {
|
||||
for i := 0; i < rb.nrune; i++ {
|
||||
start := rb.rune[i].pos
|
||||
end := start + rb.rune[i].size
|
||||
rb.out = append(rb.out, rb.byte[start:end]...)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// flush appends the normalized segment to out and resets rb.
|
||||
func (rb *reorderBuffer) flush(out []byte) []byte {
|
||||
for i := 0; i < rb.nrune; i++ {
|
||||
start := rb.rune[i].pos
|
||||
end := start + rb.rune[i].size
|
||||
out = append(out, rb.byte[start:end]...)
|
||||
}
|
||||
rb.reset()
|
||||
return out
|
||||
}
|
||||
|
||||
// flushCopy copies the normalized segment to buf and resets rb.
|
||||
// It returns the number of bytes written to buf.
|
||||
func (rb *reorderBuffer) flushCopy(buf []byte) int {
|
||||
p := 0
|
||||
for i := 0; i < rb.nrune; i++ {
|
||||
runep := rb.rune[i]
|
||||
p += copy(buf[p:], rb.byte[runep.pos:runep.pos+runep.size])
|
||||
}
|
||||
rb.reset()
|
||||
return p
|
||||
}
|
||||
|
||||
// insertOrdered inserts a rune in the buffer, ordered by Canonical Combining Class.
|
||||
// It returns false if the buffer is not large enough to hold the rune.
|
||||
// It is used internally by insert and insertString only.
|
||||
func (rb *reorderBuffer) insertOrdered(info Properties) {
|
||||
n := rb.nrune
|
||||
b := rb.rune[:]
|
||||
cc := info.ccc
|
||||
if cc > 0 {
|
||||
// Find insertion position + move elements to make room.
|
||||
for ; n > 0; n-- {
|
||||
if b[n-1].ccc <= cc {
|
||||
break
|
||||
}
|
||||
b[n] = b[n-1]
|
||||
}
|
||||
}
|
||||
rb.nrune += 1
|
||||
pos := uint8(rb.nbyte)
|
||||
rb.nbyte += utf8.UTFMax
|
||||
info.pos = pos
|
||||
b[n] = info
|
||||
}
|
||||
|
||||
// insertErr is an error code returned by insert. Using this type instead
|
||||
// of error improves performance up to 20% for many of the benchmarks.
|
||||
type insertErr int
|
||||
|
||||
const (
|
||||
iSuccess insertErr = -iota
|
||||
iShortDst
|
||||
iShortSrc
|
||||
)
|
||||
|
||||
// insertFlush inserts the given rune in the buffer ordered by CCC.
|
||||
// If a decomposition with multiple segments are encountered, they leading
|
||||
// ones are flushed.
|
||||
// It returns a non-zero error code if the rune was not inserted.
|
||||
func (rb *reorderBuffer) insertFlush(src input, i int, info Properties) insertErr {
|
||||
if rune := src.hangul(i); rune != 0 {
|
||||
rb.decomposeHangul(rune)
|
||||
return iSuccess
|
||||
}
|
||||
if info.hasDecomposition() {
|
||||
return rb.insertDecomposed(info.Decomposition())
|
||||
}
|
||||
rb.insertSingle(src, i, info)
|
||||
return iSuccess
|
||||
}
|
||||
|
||||
// insertUnsafe inserts the given rune in the buffer ordered by CCC.
|
||||
// It is assumed there is sufficient space to hold the runes. It is the
|
||||
// responsibility of the caller to ensure this. This can be done by checking
|
||||
// the state returned by the streamSafe type.
|
||||
func (rb *reorderBuffer) insertUnsafe(src input, i int, info Properties) {
|
||||
if rune := src.hangul(i); rune != 0 {
|
||||
rb.decomposeHangul(rune)
|
||||
}
|
||||
if info.hasDecomposition() {
|
||||
// TODO: inline.
|
||||
rb.insertDecomposed(info.Decomposition())
|
||||
} else {
|
||||
rb.insertSingle(src, i, info)
|
||||
}
|
||||
}
|
||||
|
||||
// insertDecomposed inserts an entry in to the reorderBuffer for each rune
|
||||
// in dcomp. dcomp must be a sequence of decomposed UTF-8-encoded runes.
|
||||
// It flushes the buffer on each new segment start.
|
||||
func (rb *reorderBuffer) insertDecomposed(dcomp []byte) insertErr {
|
||||
rb.tmpBytes.setBytes(dcomp)
|
||||
for i := 0; i < len(dcomp); {
|
||||
info := rb.f.info(rb.tmpBytes, i)
|
||||
if info.BoundaryBefore() && rb.nrune > 0 && !rb.doFlush() {
|
||||
return iShortDst
|
||||
}
|
||||
i += copy(rb.byte[rb.nbyte:], dcomp[i:i+int(info.size)])
|
||||
rb.insertOrdered(info)
|
||||
}
|
||||
return iSuccess
|
||||
}
|
||||
|
||||
// insertSingle inserts an entry in the reorderBuffer for the rune at
|
||||
// position i. info is the runeInfo for the rune at position i.
|
||||
func (rb *reorderBuffer) insertSingle(src input, i int, info Properties) {
|
||||
src.copySlice(rb.byte[rb.nbyte:], i, i+int(info.size))
|
||||
rb.insertOrdered(info)
|
||||
}
|
||||
|
||||
// insertCGJ inserts a Combining Grapheme Joiner (0x034f) into rb.
|
||||
func (rb *reorderBuffer) insertCGJ() {
|
||||
rb.insertSingle(input{str: GraphemeJoiner}, 0, Properties{size: uint8(len(GraphemeJoiner))})
|
||||
}
|
||||
|
||||
// appendRune inserts a rune at the end of the buffer. It is used for Hangul.
|
||||
func (rb *reorderBuffer) appendRune(r rune) {
|
||||
bn := rb.nbyte
|
||||
sz := utf8.EncodeRune(rb.byte[bn:], rune(r))
|
||||
rb.nbyte += utf8.UTFMax
|
||||
rb.rune[rb.nrune] = Properties{pos: bn, size: uint8(sz)}
|
||||
rb.nrune++
|
||||
}
|
||||
|
||||
// assignRune sets a rune at position pos. It is used for Hangul and recomposition.
|
||||
func (rb *reorderBuffer) assignRune(pos int, r rune) {
|
||||
bn := rb.rune[pos].pos
|
||||
sz := utf8.EncodeRune(rb.byte[bn:], rune(r))
|
||||
rb.rune[pos] = Properties{pos: bn, size: uint8(sz)}
|
||||
}
|
||||
|
||||
// runeAt returns the rune at position n. It is used for Hangul and recomposition.
|
||||
func (rb *reorderBuffer) runeAt(n int) rune {
|
||||
inf := rb.rune[n]
|
||||
r, _ := utf8.DecodeRune(rb.byte[inf.pos : inf.pos+inf.size])
|
||||
return r
|
||||
}
|
||||
|
||||
// bytesAt returns the UTF-8 encoding of the rune at position n.
|
||||
// It is used for Hangul and recomposition.
|
||||
func (rb *reorderBuffer) bytesAt(n int) []byte {
|
||||
inf := rb.rune[n]
|
||||
return rb.byte[inf.pos : int(inf.pos)+int(inf.size)]
|
||||
}
|
||||
|
||||
// For Hangul we combine algorithmically, instead of using tables.
|
||||
const (
|
||||
hangulBase = 0xAC00 // UTF-8(hangulBase) -> EA B0 80
|
||||
hangulBase0 = 0xEA
|
||||
hangulBase1 = 0xB0
|
||||
hangulBase2 = 0x80
|
||||
|
||||
hangulEnd = hangulBase + jamoLVTCount // UTF-8(0xD7A4) -> ED 9E A4
|
||||
hangulEnd0 = 0xED
|
||||
hangulEnd1 = 0x9E
|
||||
hangulEnd2 = 0xA4
|
||||
|
||||
jamoLBase = 0x1100 // UTF-8(jamoLBase) -> E1 84 00
|
||||
jamoLBase0 = 0xE1
|
||||
jamoLBase1 = 0x84
|
||||
jamoLEnd = 0x1113
|
||||
jamoVBase = 0x1161
|
||||
jamoVEnd = 0x1176
|
||||
jamoTBase = 0x11A7
|
||||
jamoTEnd = 0x11C3
|
||||
|
||||
jamoTCount = 28
|
||||
jamoVCount = 21
|
||||
jamoVTCount = 21 * 28
|
||||
jamoLVTCount = 19 * 21 * 28
|
||||
)
|
||||
|
||||
const hangulUTF8Size = 3
|
||||
|
||||
func isHangul(b []byte) bool {
|
||||
if len(b) < hangulUTF8Size {
|
||||
return false
|
||||
}
|
||||
b0 := b[0]
|
||||
if b0 < hangulBase0 {
|
||||
return false
|
||||
}
|
||||
b1 := b[1]
|
||||
switch {
|
||||
case b0 == hangulBase0:
|
||||
return b1 >= hangulBase1
|
||||
case b0 < hangulEnd0:
|
||||
return true
|
||||
case b0 > hangulEnd0:
|
||||
return false
|
||||
case b1 < hangulEnd1:
|
||||
return true
|
||||
}
|
||||
return b1 == hangulEnd1 && b[2] < hangulEnd2
|
||||
}
|
||||
|
||||
func isHangulString(b string) bool {
|
||||
if len(b) < hangulUTF8Size {
|
||||
return false
|
||||
}
|
||||
b0 := b[0]
|
||||
if b0 < hangulBase0 {
|
||||
return false
|
||||
}
|
||||
b1 := b[1]
|
||||
switch {
|
||||
case b0 == hangulBase0:
|
||||
return b1 >= hangulBase1
|
||||
case b0 < hangulEnd0:
|
||||
return true
|
||||
case b0 > hangulEnd0:
|
||||
return false
|
||||
case b1 < hangulEnd1:
|
||||
return true
|
||||
}
|
||||
return b1 == hangulEnd1 && b[2] < hangulEnd2
|
||||
}
|
||||
|
||||
// Caller must ensure len(b) >= 2.
|
||||
func isJamoVT(b []byte) bool {
|
||||
// True if (rune & 0xff00) == jamoLBase
|
||||
return b[0] == jamoLBase0 && (b[1]&0xFC) == jamoLBase1
|
||||
}
|
||||
|
||||
func isHangulWithoutJamoT(b []byte) bool {
|
||||
c, _ := utf8.DecodeRune(b)
|
||||
c -= hangulBase
|
||||
return c < jamoLVTCount && c%jamoTCount == 0
|
||||
}
|
||||
|
||||
// decomposeHangul writes the decomposed Hangul to buf and returns the number
|
||||
// of bytes written. len(buf) should be at least 9.
|
||||
func decomposeHangul(buf []byte, r rune) int {
|
||||
const JamoUTF8Len = 3
|
||||
r -= hangulBase
|
||||
x := r % jamoTCount
|
||||
r /= jamoTCount
|
||||
utf8.EncodeRune(buf, jamoLBase+r/jamoVCount)
|
||||
utf8.EncodeRune(buf[JamoUTF8Len:], jamoVBase+r%jamoVCount)
|
||||
if x != 0 {
|
||||
utf8.EncodeRune(buf[2*JamoUTF8Len:], jamoTBase+x)
|
||||
return 3 * JamoUTF8Len
|
||||
}
|
||||
return 2 * JamoUTF8Len
|
||||
}
|
||||
|
||||
// decomposeHangul algorithmically decomposes a Hangul rune into
|
||||
// its Jamo components.
|
||||
// See http://unicode.org/reports/tr15/#Hangul for details on decomposing Hangul.
|
||||
func (rb *reorderBuffer) decomposeHangul(r rune) {
|
||||
r -= hangulBase
|
||||
x := r % jamoTCount
|
||||
r /= jamoTCount
|
||||
rb.appendRune(jamoLBase + r/jamoVCount)
|
||||
rb.appendRune(jamoVBase + r%jamoVCount)
|
||||
if x != 0 {
|
||||
rb.appendRune(jamoTBase + x)
|
||||
}
|
||||
}
|
||||
|
||||
// combineHangul algorithmically combines Jamo character components into Hangul.
|
||||
// See http://unicode.org/reports/tr15/#Hangul for details on combining Hangul.
|
||||
func (rb *reorderBuffer) combineHangul(s, i, k int) {
|
||||
b := rb.rune[:]
|
||||
bn := rb.nrune
|
||||
for ; i < bn; i++ {
|
||||
cccB := b[k-1].ccc
|
||||
cccC := b[i].ccc
|
||||
if cccB == 0 {
|
||||
s = k - 1
|
||||
}
|
||||
if s != k-1 && cccB >= cccC {
|
||||
// b[i] is blocked by greater-equal cccX below it
|
||||
b[k] = b[i]
|
||||
k++
|
||||
} else {
|
||||
l := rb.runeAt(s) // also used to compare to hangulBase
|
||||
v := rb.runeAt(i) // also used to compare to jamoT
|
||||
switch {
|
||||
case jamoLBase <= l && l < jamoLEnd &&
|
||||
jamoVBase <= v && v < jamoVEnd:
|
||||
// 11xx plus 116x to LV
|
||||
rb.assignRune(s, hangulBase+
|
||||
(l-jamoLBase)*jamoVTCount+(v-jamoVBase)*jamoTCount)
|
||||
case hangulBase <= l && l < hangulEnd &&
|
||||
jamoTBase < v && v < jamoTEnd &&
|
||||
((l-hangulBase)%jamoTCount) == 0:
|
||||
// ACxx plus 11Ax to LVT
|
||||
rb.assignRune(s, l+v-jamoTBase)
|
||||
default:
|
||||
b[k] = b[i]
|
||||
k++
|
||||
}
|
||||
}
|
||||
}
|
||||
rb.nrune = k
|
||||
}
|
||||
|
||||
// compose recombines the runes in the buffer.
|
||||
// It should only be used to recompose a single segment, as it will not
|
||||
// handle alternations between Hangul and non-Hangul characters correctly.
|
||||
func (rb *reorderBuffer) compose() {
|
||||
// UAX #15, section X5 , including Corrigendum #5
|
||||
// "In any character sequence beginning with starter S, a character C is
|
||||
// blocked from S if and only if there is some character B between S
|
||||
// and C, and either B is a starter or it has the same or higher
|
||||
// combining class as C."
|
||||
bn := rb.nrune
|
||||
if bn == 0 {
|
||||
return
|
||||
}
|
||||
k := 1
|
||||
b := rb.rune[:]
|
||||
for s, i := 0, 1; i < bn; i++ {
|
||||
if isJamoVT(rb.bytesAt(i)) {
|
||||
// Redo from start in Hangul mode. Necessary to support
|
||||
// U+320E..U+321E in NFKC mode.
|
||||
rb.combineHangul(s, i, k)
|
||||
return
|
||||
}
|
||||
ii := b[i]
|
||||
// We can only use combineForward as a filter if we later
|
||||
// get the info for the combined character. This is more
|
||||
// expensive than using the filter. Using combinesBackward()
|
||||
// is safe.
|
||||
if ii.combinesBackward() {
|
||||
cccB := b[k-1].ccc
|
||||
cccC := ii.ccc
|
||||
blocked := false // b[i] blocked by starter or greater or equal CCC?
|
||||
if cccB == 0 {
|
||||
s = k - 1
|
||||
} else {
|
||||
blocked = s != k-1 && cccB >= cccC
|
||||
}
|
||||
if !blocked {
|
||||
combined := combine(rb.runeAt(s), rb.runeAt(i))
|
||||
if combined != 0 {
|
||||
rb.assignRune(s, combined)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
b[k] = b[i]
|
||||
k++
|
||||
}
|
||||
rb.nrune = k
|
||||
}
|
||||
256
vendor/golang.org/x/text/unicode/norm/forminfo.go
generated
vendored
Normal file
256
vendor/golang.org/x/text/unicode/norm/forminfo.go
generated
vendored
Normal file
@@ -0,0 +1,256 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package norm
|
||||
|
||||
// This file contains Form-specific logic and wrappers for data in tables.go.
|
||||
|
||||
// Rune info is stored in a separate trie per composing form. A composing form
|
||||
// and its corresponding decomposing form share the same trie. Each trie maps
|
||||
// a rune to a uint16. The values take two forms. For v >= 0x8000:
|
||||
// bits
|
||||
// 15: 1 (inverse of NFD_QD bit of qcInfo)
|
||||
// 13..7: qcInfo (see below). isYesD is always true (no decompostion).
|
||||
// 6..0: ccc (compressed CCC value).
|
||||
// For v < 0x8000, the respective rune has a decomposition and v is an index
|
||||
// into a byte array of UTF-8 decomposition sequences and additional info and
|
||||
// has the form:
|
||||
// <header> <decomp_byte>* [<tccc> [<lccc>]]
|
||||
// The header contains the number of bytes in the decomposition (excluding this
|
||||
// length byte). The two most significant bits of this length byte correspond
|
||||
// to bit 5 and 4 of qcInfo (see below). The byte sequence itself starts at v+1.
|
||||
// The byte sequence is followed by a trailing and leading CCC if the values
|
||||
// for these are not zero. The value of v determines which ccc are appended
|
||||
// to the sequences. For v < firstCCC, there are none, for v >= firstCCC,
|
||||
// the sequence is followed by a trailing ccc, and for v >= firstLeadingCC
|
||||
// there is an additional leading ccc. The value of tccc itself is the
|
||||
// trailing CCC shifted left 2 bits. The two least-significant bits of tccc
|
||||
// are the number of trailing non-starters.
|
||||
|
||||
const (
|
||||
qcInfoMask = 0x3F // to clear all but the relevant bits in a qcInfo
|
||||
headerLenMask = 0x3F // extract the length value from the header byte
|
||||
headerFlagsMask = 0xC0 // extract the qcInfo bits from the header byte
|
||||
)
|
||||
|
||||
// Properties provides access to normalization properties of a rune.
|
||||
type Properties struct {
|
||||
pos uint8 // start position in reorderBuffer; used in composition.go
|
||||
size uint8 // length of UTF-8 encoding of this rune
|
||||
ccc uint8 // leading canonical combining class (ccc if not decomposition)
|
||||
tccc uint8 // trailing canonical combining class (ccc if not decomposition)
|
||||
nLead uint8 // number of leading non-starters.
|
||||
flags qcInfo // quick check flags
|
||||
index uint16
|
||||
}
|
||||
|
||||
// functions dispatchable per form
|
||||
type lookupFunc func(b input, i int) Properties
|
||||
|
||||
// formInfo holds Form-specific functions and tables.
|
||||
type formInfo struct {
|
||||
form Form
|
||||
composing, compatibility bool // form type
|
||||
info lookupFunc
|
||||
nextMain iterFunc
|
||||
}
|
||||
|
||||
var formTable []*formInfo
|
||||
|
||||
func init() {
|
||||
formTable = make([]*formInfo, 4)
|
||||
|
||||
for i := range formTable {
|
||||
f := &formInfo{}
|
||||
formTable[i] = f
|
||||
f.form = Form(i)
|
||||
if Form(i) == NFKD || Form(i) == NFKC {
|
||||
f.compatibility = true
|
||||
f.info = lookupInfoNFKC
|
||||
} else {
|
||||
f.info = lookupInfoNFC
|
||||
}
|
||||
f.nextMain = nextDecomposed
|
||||
if Form(i) == NFC || Form(i) == NFKC {
|
||||
f.nextMain = nextComposed
|
||||
f.composing = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We do not distinguish between boundaries for NFC, NFD, etc. to avoid
|
||||
// unexpected behavior for the user. For example, in NFD, there is a boundary
|
||||
// after 'a'. However, 'a' might combine with modifiers, so from the application's
|
||||
// perspective it is not a good boundary. We will therefore always use the
|
||||
// boundaries for the combining variants.
|
||||
|
||||
// BoundaryBefore returns true if this rune starts a new segment and
|
||||
// cannot combine with any rune on the left.
|
||||
func (p Properties) BoundaryBefore() bool {
|
||||
if p.ccc == 0 && !p.combinesBackward() {
|
||||
return true
|
||||
}
|
||||
// We assume that the CCC of the first character in a decomposition
|
||||
// is always non-zero if different from info.ccc and that we can return
|
||||
// false at this point. This is verified by maketables.
|
||||
return false
|
||||
}
|
||||
|
||||
// BoundaryAfter returns true if runes cannot combine with or otherwise
|
||||
// interact with this or previous runes.
|
||||
func (p Properties) BoundaryAfter() bool {
|
||||
// TODO: loosen these conditions.
|
||||
return p.isInert()
|
||||
}
|
||||
|
||||
// We pack quick check data in 4 bits:
|
||||
// 5: Combines forward (0 == false, 1 == true)
|
||||
// 4..3: NFC_QC Yes(00), No (10), or Maybe (11)
|
||||
// 2: NFD_QC Yes (0) or No (1). No also means there is a decomposition.
|
||||
// 1..0: Number of trailing non-starters.
|
||||
//
|
||||
// When all 4 bits are zero, the character is inert, meaning it is never
|
||||
// influenced by normalization.
|
||||
type qcInfo uint8
|
||||
|
||||
func (p Properties) isYesC() bool { return p.flags&0x10 == 0 }
|
||||
func (p Properties) isYesD() bool { return p.flags&0x4 == 0 }
|
||||
|
||||
func (p Properties) combinesForward() bool { return p.flags&0x20 != 0 }
|
||||
func (p Properties) combinesBackward() bool { return p.flags&0x8 != 0 } // == isMaybe
|
||||
func (p Properties) hasDecomposition() bool { return p.flags&0x4 != 0 } // == isNoD
|
||||
|
||||
func (p Properties) isInert() bool {
|
||||
return p.flags&qcInfoMask == 0 && p.ccc == 0
|
||||
}
|
||||
|
||||
func (p Properties) multiSegment() bool {
|
||||
return p.index >= firstMulti && p.index < endMulti
|
||||
}
|
||||
|
||||
func (p Properties) nLeadingNonStarters() uint8 {
|
||||
return p.nLead
|
||||
}
|
||||
|
||||
func (p Properties) nTrailingNonStarters() uint8 {
|
||||
return uint8(p.flags & 0x03)
|
||||
}
|
||||
|
||||
// Decomposition returns the decomposition for the underlying rune
|
||||
// or nil if there is none.
|
||||
func (p Properties) Decomposition() []byte {
|
||||
// TODO: create the decomposition for Hangul?
|
||||
if p.index == 0 {
|
||||
return nil
|
||||
}
|
||||
i := p.index
|
||||
n := decomps[i] & headerLenMask
|
||||
i++
|
||||
return decomps[i : i+uint16(n)]
|
||||
}
|
||||
|
||||
// Size returns the length of UTF-8 encoding of the rune.
|
||||
func (p Properties) Size() int {
|
||||
return int(p.size)
|
||||
}
|
||||
|
||||
// CCC returns the canonical combining class of the underlying rune.
|
||||
func (p Properties) CCC() uint8 {
|
||||
if p.index >= firstCCCZeroExcept {
|
||||
return 0
|
||||
}
|
||||
return ccc[p.ccc]
|
||||
}
|
||||
|
||||
// LeadCCC returns the CCC of the first rune in the decomposition.
|
||||
// If there is no decomposition, LeadCCC equals CCC.
|
||||
func (p Properties) LeadCCC() uint8 {
|
||||
return ccc[p.ccc]
|
||||
}
|
||||
|
||||
// TrailCCC returns the CCC of the last rune in the decomposition.
|
||||
// If there is no decomposition, TrailCCC equals CCC.
|
||||
func (p Properties) TrailCCC() uint8 {
|
||||
return ccc[p.tccc]
|
||||
}
|
||||
|
||||
// Recomposition
|
||||
// We use 32-bit keys instead of 64-bit for the two codepoint keys.
|
||||
// This clips off the bits of three entries, but we know this will not
|
||||
// result in a collision. In the unlikely event that changes to
|
||||
// UnicodeData.txt introduce collisions, the compiler will catch it.
|
||||
// Note that the recomposition map for NFC and NFKC are identical.
|
||||
|
||||
// combine returns the combined rune or 0 if it doesn't exist.
|
||||
func combine(a, b rune) rune {
|
||||
key := uint32(uint16(a))<<16 + uint32(uint16(b))
|
||||
return recompMap[key]
|
||||
}
|
||||
|
||||
func lookupInfoNFC(b input, i int) Properties {
|
||||
v, sz := b.charinfoNFC(i)
|
||||
return compInfo(v, sz)
|
||||
}
|
||||
|
||||
func lookupInfoNFKC(b input, i int) Properties {
|
||||
v, sz := b.charinfoNFKC(i)
|
||||
return compInfo(v, sz)
|
||||
}
|
||||
|
||||
// Properties returns properties for the first rune in s.
|
||||
func (f Form) Properties(s []byte) Properties {
|
||||
if f == NFC || f == NFD {
|
||||
return compInfo(nfcData.lookup(s))
|
||||
}
|
||||
return compInfo(nfkcData.lookup(s))
|
||||
}
|
||||
|
||||
// PropertiesString returns properties for the first rune in s.
|
||||
func (f Form) PropertiesString(s string) Properties {
|
||||
if f == NFC || f == NFD {
|
||||
return compInfo(nfcData.lookupString(s))
|
||||
}
|
||||
return compInfo(nfkcData.lookupString(s))
|
||||
}
|
||||
|
||||
// compInfo converts the information contained in v and sz
|
||||
// to a Properties. See the comment at the top of the file
|
||||
// for more information on the format.
|
||||
func compInfo(v uint16, sz int) Properties {
|
||||
if v == 0 {
|
||||
return Properties{size: uint8(sz)}
|
||||
} else if v >= 0x8000 {
|
||||
p := Properties{
|
||||
size: uint8(sz),
|
||||
ccc: uint8(v),
|
||||
tccc: uint8(v),
|
||||
flags: qcInfo(v >> 8),
|
||||
}
|
||||
if p.ccc > 0 || p.combinesBackward() {
|
||||
p.nLead = uint8(p.flags & 0x3)
|
||||
}
|
||||
return p
|
||||
}
|
||||
// has decomposition
|
||||
h := decomps[v]
|
||||
f := (qcInfo(h&headerFlagsMask) >> 2) | 0x4
|
||||
p := Properties{size: uint8(sz), flags: f, index: v}
|
||||
if v >= firstCCC {
|
||||
v += uint16(h&headerLenMask) + 1
|
||||
c := decomps[v]
|
||||
p.tccc = c >> 2
|
||||
p.flags |= qcInfo(c & 0x3)
|
||||
if v >= firstLeadingCCC {
|
||||
p.nLead = c & 0x3
|
||||
if v >= firstStarterWithNLead {
|
||||
// We were tricked. Remove the decomposition.
|
||||
p.flags &= 0x03
|
||||
p.index = 0
|
||||
return p
|
||||
}
|
||||
p.ccc = decomps[v+1]
|
||||
}
|
||||
}
|
||||
return p
|
||||
}
|
||||
105
vendor/golang.org/x/text/unicode/norm/input.go
generated
vendored
Normal file
105
vendor/golang.org/x/text/unicode/norm/input.go
generated
vendored
Normal file
@@ -0,0 +1,105 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package norm
|
||||
|
||||
import "unicode/utf8"
|
||||
|
||||
type input struct {
|
||||
str string
|
||||
bytes []byte
|
||||
}
|
||||
|
||||
func inputBytes(str []byte) input {
|
||||
return input{bytes: str}
|
||||
}
|
||||
|
||||
func inputString(str string) input {
|
||||
return input{str: str}
|
||||
}
|
||||
|
||||
func (in *input) setBytes(str []byte) {
|
||||
in.str = ""
|
||||
in.bytes = str
|
||||
}
|
||||
|
||||
func (in *input) setString(str string) {
|
||||
in.str = str
|
||||
in.bytes = nil
|
||||
}
|
||||
|
||||
func (in *input) _byte(p int) byte {
|
||||
if in.bytes == nil {
|
||||
return in.str[p]
|
||||
}
|
||||
return in.bytes[p]
|
||||
}
|
||||
|
||||
func (in *input) skipASCII(p, max int) int {
|
||||
if in.bytes == nil {
|
||||
for ; p < max && in.str[p] < utf8.RuneSelf; p++ {
|
||||
}
|
||||
} else {
|
||||
for ; p < max && in.bytes[p] < utf8.RuneSelf; p++ {
|
||||
}
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func (in *input) skipContinuationBytes(p int) int {
|
||||
if in.bytes == nil {
|
||||
for ; p < len(in.str) && !utf8.RuneStart(in.str[p]); p++ {
|
||||
}
|
||||
} else {
|
||||
for ; p < len(in.bytes) && !utf8.RuneStart(in.bytes[p]); p++ {
|
||||
}
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func (in *input) appendSlice(buf []byte, b, e int) []byte {
|
||||
if in.bytes != nil {
|
||||
return append(buf, in.bytes[b:e]...)
|
||||
}
|
||||
for i := b; i < e; i++ {
|
||||
buf = append(buf, in.str[i])
|
||||
}
|
||||
return buf
|
||||
}
|
||||
|
||||
func (in *input) copySlice(buf []byte, b, e int) int {
|
||||
if in.bytes == nil {
|
||||
return copy(buf, in.str[b:e])
|
||||
}
|
||||
return copy(buf, in.bytes[b:e])
|
||||
}
|
||||
|
||||
func (in *input) charinfoNFC(p int) (uint16, int) {
|
||||
if in.bytes == nil {
|
||||
return nfcData.lookupString(in.str[p:])
|
||||
}
|
||||
return nfcData.lookup(in.bytes[p:])
|
||||
}
|
||||
|
||||
func (in *input) charinfoNFKC(p int) (uint16, int) {
|
||||
if in.bytes == nil {
|
||||
return nfkcData.lookupString(in.str[p:])
|
||||
}
|
||||
return nfkcData.lookup(in.bytes[p:])
|
||||
}
|
||||
|
||||
func (in *input) hangul(p int) (r rune) {
|
||||
if in.bytes == nil {
|
||||
if !isHangulString(in.str[p:]) {
|
||||
return 0
|
||||
}
|
||||
r, _ = utf8.DecodeRuneInString(in.str[p:])
|
||||
} else {
|
||||
if !isHangul(in.bytes[p:]) {
|
||||
return 0
|
||||
}
|
||||
r, _ = utf8.DecodeRune(in.bytes[p:])
|
||||
}
|
||||
return r
|
||||
}
|
||||
450
vendor/golang.org/x/text/unicode/norm/iter.go
generated
vendored
Normal file
450
vendor/golang.org/x/text/unicode/norm/iter.go
generated
vendored
Normal file
@@ -0,0 +1,450 @@
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package norm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// MaxSegmentSize is the maximum size of a byte buffer needed to consider any
|
||||
// sequence of starter and non-starter runes for the purpose of normalization.
|
||||
const MaxSegmentSize = maxByteBufferSize
|
||||
|
||||
// An Iter iterates over a string or byte slice, while normalizing it
|
||||
// to a given Form.
|
||||
type Iter struct {
|
||||
rb reorderBuffer
|
||||
buf [maxByteBufferSize]byte
|
||||
info Properties // first character saved from previous iteration
|
||||
next iterFunc // implementation of next depends on form
|
||||
asciiF iterFunc
|
||||
|
||||
p int // current position in input source
|
||||
multiSeg []byte // remainder of multi-segment decomposition
|
||||
}
|
||||
|
||||
type iterFunc func(*Iter) []byte
|
||||
|
||||
// Init initializes i to iterate over src after normalizing it to Form f.
|
||||
func (i *Iter) Init(f Form, src []byte) {
|
||||
i.p = 0
|
||||
if len(src) == 0 {
|
||||
i.setDone()
|
||||
i.rb.nsrc = 0
|
||||
return
|
||||
}
|
||||
i.multiSeg = nil
|
||||
i.rb.init(f, src)
|
||||
i.next = i.rb.f.nextMain
|
||||
i.asciiF = nextASCIIBytes
|
||||
i.info = i.rb.f.info(i.rb.src, i.p)
|
||||
}
|
||||
|
||||
// InitString initializes i to iterate over src after normalizing it to Form f.
|
||||
func (i *Iter) InitString(f Form, src string) {
|
||||
i.p = 0
|
||||
if len(src) == 0 {
|
||||
i.setDone()
|
||||
i.rb.nsrc = 0
|
||||
return
|
||||
}
|
||||
i.multiSeg = nil
|
||||
i.rb.initString(f, src)
|
||||
i.next = i.rb.f.nextMain
|
||||
i.asciiF = nextASCIIString
|
||||
i.info = i.rb.f.info(i.rb.src, i.p)
|
||||
}
|
||||
|
||||
// Seek sets the segment to be returned by the next call to Next to start
|
||||
// at position p. It is the responsibility of the caller to set p to the
|
||||
// start of a UTF8 rune.
|
||||
func (i *Iter) Seek(offset int64, whence int) (int64, error) {
|
||||
var abs int64
|
||||
switch whence {
|
||||
case 0:
|
||||
abs = offset
|
||||
case 1:
|
||||
abs = int64(i.p) + offset
|
||||
case 2:
|
||||
abs = int64(i.rb.nsrc) + offset
|
||||
default:
|
||||
return 0, fmt.Errorf("norm: invalid whence")
|
||||
}
|
||||
if abs < 0 {
|
||||
return 0, fmt.Errorf("norm: negative position")
|
||||
}
|
||||
if int(abs) >= i.rb.nsrc {
|
||||
i.setDone()
|
||||
return int64(i.p), nil
|
||||
}
|
||||
i.p = int(abs)
|
||||
i.multiSeg = nil
|
||||
i.next = i.rb.f.nextMain
|
||||
i.info = i.rb.f.info(i.rb.src, i.p)
|
||||
return abs, nil
|
||||
}
|
||||
|
||||
// returnSlice returns a slice of the underlying input type as a byte slice.
|
||||
// If the underlying is of type []byte, it will simply return a slice.
|
||||
// If the underlying is of type string, it will copy the slice to the buffer
|
||||
// and return that.
|
||||
func (i *Iter) returnSlice(a, b int) []byte {
|
||||
if i.rb.src.bytes == nil {
|
||||
return i.buf[:copy(i.buf[:], i.rb.src.str[a:b])]
|
||||
}
|
||||
return i.rb.src.bytes[a:b]
|
||||
}
|
||||
|
||||
// Pos returns the byte position at which the next call to Next will commence processing.
|
||||
func (i *Iter) Pos() int {
|
||||
return i.p
|
||||
}
|
||||
|
||||
func (i *Iter) setDone() {
|
||||
i.next = nextDone
|
||||
i.p = i.rb.nsrc
|
||||
}
|
||||
|
||||
// Done returns true if there is no more input to process.
|
||||
func (i *Iter) Done() bool {
|
||||
return i.p >= i.rb.nsrc
|
||||
}
|
||||
|
||||
// Next returns f(i.input[i.Pos():n]), where n is a boundary of i.input.
|
||||
// For any input a and b for which f(a) == f(b), subsequent calls
|
||||
// to Next will return the same segments.
|
||||
// Modifying runes are grouped together with the preceding starter, if such a starter exists.
|
||||
// Although not guaranteed, n will typically be the smallest possible n.
|
||||
func (i *Iter) Next() []byte {
|
||||
return i.next(i)
|
||||
}
|
||||
|
||||
func nextASCIIBytes(i *Iter) []byte {
|
||||
p := i.p + 1
|
||||
if p >= i.rb.nsrc {
|
||||
i.setDone()
|
||||
return i.rb.src.bytes[i.p:p]
|
||||
}
|
||||
if i.rb.src.bytes[p] < utf8.RuneSelf {
|
||||
p0 := i.p
|
||||
i.p = p
|
||||
return i.rb.src.bytes[p0:p]
|
||||
}
|
||||
i.info = i.rb.f.info(i.rb.src, i.p)
|
||||
i.next = i.rb.f.nextMain
|
||||
return i.next(i)
|
||||
}
|
||||
|
||||
func nextASCIIString(i *Iter) []byte {
|
||||
p := i.p + 1
|
||||
if p >= i.rb.nsrc {
|
||||
i.buf[0] = i.rb.src.str[i.p]
|
||||
i.setDone()
|
||||
return i.buf[:1]
|
||||
}
|
||||
if i.rb.src.str[p] < utf8.RuneSelf {
|
||||
i.buf[0] = i.rb.src.str[i.p]
|
||||
i.p = p
|
||||
return i.buf[:1]
|
||||
}
|
||||
i.info = i.rb.f.info(i.rb.src, i.p)
|
||||
i.next = i.rb.f.nextMain
|
||||
return i.next(i)
|
||||
}
|
||||
|
||||
func nextHangul(i *Iter) []byte {
|
||||
p := i.p
|
||||
next := p + hangulUTF8Size
|
||||
if next >= i.rb.nsrc {
|
||||
i.setDone()
|
||||
} else if i.rb.src.hangul(next) == 0 {
|
||||
i.info = i.rb.f.info(i.rb.src, i.p)
|
||||
i.next = i.rb.f.nextMain
|
||||
return i.next(i)
|
||||
}
|
||||
i.p = next
|
||||
return i.buf[:decomposeHangul(i.buf[:], i.rb.src.hangul(p))]
|
||||
}
|
||||
|
||||
func nextDone(i *Iter) []byte {
|
||||
return nil
|
||||
}
|
||||
|
||||
// nextMulti is used for iterating over multi-segment decompositions
|
||||
// for decomposing normal forms.
|
||||
func nextMulti(i *Iter) []byte {
|
||||
j := 0
|
||||
d := i.multiSeg
|
||||
// skip first rune
|
||||
for j = 1; j < len(d) && !utf8.RuneStart(d[j]); j++ {
|
||||
}
|
||||
for j < len(d) {
|
||||
info := i.rb.f.info(input{bytes: d}, j)
|
||||
if info.BoundaryBefore() {
|
||||
i.multiSeg = d[j:]
|
||||
return d[:j]
|
||||
}
|
||||
j += int(info.size)
|
||||
}
|
||||
// treat last segment as normal decomposition
|
||||
i.next = i.rb.f.nextMain
|
||||
return i.next(i)
|
||||
}
|
||||
|
||||
// nextMultiNorm is used for iterating over multi-segment decompositions
|
||||
// for composing normal forms.
|
||||
func nextMultiNorm(i *Iter) []byte {
|
||||
j := 0
|
||||
d := i.multiSeg
|
||||
for j < len(d) {
|
||||
info := i.rb.f.info(input{bytes: d}, j)
|
||||
if info.BoundaryBefore() {
|
||||
i.rb.compose()
|
||||
seg := i.buf[:i.rb.flushCopy(i.buf[:])]
|
||||
i.rb.ss.first(info)
|
||||
i.rb.insertUnsafe(input{bytes: d}, j, info)
|
||||
i.multiSeg = d[j+int(info.size):]
|
||||
return seg
|
||||
}
|
||||
i.rb.ss.next(info)
|
||||
i.rb.insertUnsafe(input{bytes: d}, j, info)
|
||||
j += int(info.size)
|
||||
}
|
||||
i.multiSeg = nil
|
||||
i.next = nextComposed
|
||||
return doNormComposed(i)
|
||||
}
|
||||
|
||||
// nextDecomposed is the implementation of Next for forms NFD and NFKD.
|
||||
func nextDecomposed(i *Iter) (next []byte) {
|
||||
outp := 0
|
||||
inCopyStart, outCopyStart := i.p, 0
|
||||
ss := mkStreamSafe(i.info)
|
||||
for {
|
||||
if sz := int(i.info.size); sz <= 1 {
|
||||
p := i.p
|
||||
i.p++ // ASCII or illegal byte. Either way, advance by 1.
|
||||
if i.p >= i.rb.nsrc {
|
||||
i.setDone()
|
||||
return i.returnSlice(p, i.p)
|
||||
} else if i.rb.src._byte(i.p) < utf8.RuneSelf {
|
||||
i.next = i.asciiF
|
||||
return i.returnSlice(p, i.p)
|
||||
}
|
||||
outp++
|
||||
} else if d := i.info.Decomposition(); d != nil {
|
||||
// Note: If leading CCC != 0, then len(d) == 2 and last is also non-zero.
|
||||
// Case 1: there is a leftover to copy. In this case the decomposition
|
||||
// must begin with a modifier and should always be appended.
|
||||
// Case 2: no leftover. Simply return d if followed by a ccc == 0 value.
|
||||
p := outp + len(d)
|
||||
if outp > 0 {
|
||||
i.rb.src.copySlice(i.buf[outCopyStart:], inCopyStart, i.p)
|
||||
if p > len(i.buf) {
|
||||
return i.buf[:outp]
|
||||
}
|
||||
} else if i.info.multiSegment() {
|
||||
// outp must be 0 as multi-segment decompositions always
|
||||
// start a new segment.
|
||||
if i.multiSeg == nil {
|
||||
i.multiSeg = d
|
||||
i.next = nextMulti
|
||||
return nextMulti(i)
|
||||
}
|
||||
// We are in the last segment. Treat as normal decomposition.
|
||||
d = i.multiSeg
|
||||
i.multiSeg = nil
|
||||
p = len(d)
|
||||
}
|
||||
prevCC := i.info.tccc
|
||||
if i.p += sz; i.p >= i.rb.nsrc {
|
||||
i.setDone()
|
||||
i.info = Properties{} // Force BoundaryBefore to succeed.
|
||||
} else {
|
||||
i.info = i.rb.f.info(i.rb.src, i.p)
|
||||
}
|
||||
switch ss.next(i.info) {
|
||||
case ssOverflow:
|
||||
i.next = nextCGJDecompose
|
||||
fallthrough
|
||||
case ssStarter:
|
||||
if outp > 0 {
|
||||
copy(i.buf[outp:], d)
|
||||
return i.buf[:p]
|
||||
}
|
||||
return d
|
||||
}
|
||||
copy(i.buf[outp:], d)
|
||||
outp = p
|
||||
inCopyStart, outCopyStart = i.p, outp
|
||||
if i.info.ccc < prevCC {
|
||||
goto doNorm
|
||||
}
|
||||
continue
|
||||
} else if r := i.rb.src.hangul(i.p); r != 0 {
|
||||
outp = decomposeHangul(i.buf[:], r)
|
||||
i.p += hangulUTF8Size
|
||||
inCopyStart, outCopyStart = i.p, outp
|
||||
if i.p >= i.rb.nsrc {
|
||||
i.setDone()
|
||||
break
|
||||
} else if i.rb.src.hangul(i.p) != 0 {
|
||||
i.next = nextHangul
|
||||
return i.buf[:outp]
|
||||
}
|
||||
} else {
|
||||
p := outp + sz
|
||||
if p > len(i.buf) {
|
||||
break
|
||||
}
|
||||
outp = p
|
||||
i.p += sz
|
||||
}
|
||||
if i.p >= i.rb.nsrc {
|
||||
i.setDone()
|
||||
break
|
||||
}
|
||||
prevCC := i.info.tccc
|
||||
i.info = i.rb.f.info(i.rb.src, i.p)
|
||||
if v := ss.next(i.info); v == ssStarter {
|
||||
break
|
||||
} else if v == ssOverflow {
|
||||
i.next = nextCGJDecompose
|
||||
break
|
||||
}
|
||||
if i.info.ccc < prevCC {
|
||||
goto doNorm
|
||||
}
|
||||
}
|
||||
if outCopyStart == 0 {
|
||||
return i.returnSlice(inCopyStart, i.p)
|
||||
} else if inCopyStart < i.p {
|
||||
i.rb.src.copySlice(i.buf[outCopyStart:], inCopyStart, i.p)
|
||||
}
|
||||
return i.buf[:outp]
|
||||
doNorm:
|
||||
// Insert what we have decomposed so far in the reorderBuffer.
|
||||
// As we will only reorder, there will always be enough room.
|
||||
i.rb.src.copySlice(i.buf[outCopyStart:], inCopyStart, i.p)
|
||||
i.rb.insertDecomposed(i.buf[0:outp])
|
||||
return doNormDecomposed(i)
|
||||
}
|
||||
|
||||
func doNormDecomposed(i *Iter) []byte {
|
||||
for {
|
||||
if s := i.rb.ss.next(i.info); s == ssOverflow {
|
||||
i.next = nextCGJDecompose
|
||||
break
|
||||
}
|
||||
i.rb.insertUnsafe(i.rb.src, i.p, i.info)
|
||||
if i.p += int(i.info.size); i.p >= i.rb.nsrc {
|
||||
i.setDone()
|
||||
break
|
||||
}
|
||||
i.info = i.rb.f.info(i.rb.src, i.p)
|
||||
if i.info.ccc == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
// new segment or too many combining characters: exit normalization
|
||||
return i.buf[:i.rb.flushCopy(i.buf[:])]
|
||||
}
|
||||
|
||||
func nextCGJDecompose(i *Iter) []byte {
|
||||
i.rb.ss = 0
|
||||
i.rb.insertCGJ()
|
||||
i.next = nextDecomposed
|
||||
buf := doNormDecomposed(i)
|
||||
return buf
|
||||
}
|
||||
|
||||
// nextComposed is the implementation of Next for forms NFC and NFKC.
|
||||
func nextComposed(i *Iter) []byte {
|
||||
outp, startp := 0, i.p
|
||||
var prevCC uint8
|
||||
ss := mkStreamSafe(i.info)
|
||||
for {
|
||||
if !i.info.isYesC() {
|
||||
goto doNorm
|
||||
}
|
||||
prevCC = i.info.tccc
|
||||
sz := int(i.info.size)
|
||||
if sz == 0 {
|
||||
sz = 1 // illegal rune: copy byte-by-byte
|
||||
}
|
||||
p := outp + sz
|
||||
if p > len(i.buf) {
|
||||
break
|
||||
}
|
||||
outp = p
|
||||
i.p += sz
|
||||
if i.p >= i.rb.nsrc {
|
||||
i.setDone()
|
||||
break
|
||||
} else if i.rb.src._byte(i.p) < utf8.RuneSelf {
|
||||
i.next = i.asciiF
|
||||
break
|
||||
}
|
||||
i.info = i.rb.f.info(i.rb.src, i.p)
|
||||
if v := ss.next(i.info); v == ssStarter {
|
||||
break
|
||||
} else if v == ssOverflow {
|
||||
i.next = nextCGJCompose
|
||||
break
|
||||
}
|
||||
if i.info.ccc < prevCC {
|
||||
goto doNorm
|
||||
}
|
||||
}
|
||||
return i.returnSlice(startp, i.p)
|
||||
doNorm:
|
||||
i.p = startp
|
||||
i.info = i.rb.f.info(i.rb.src, i.p)
|
||||
if i.info.multiSegment() {
|
||||
d := i.info.Decomposition()
|
||||
info := i.rb.f.info(input{bytes: d}, 0)
|
||||
i.rb.insertUnsafe(input{bytes: d}, 0, info)
|
||||
i.multiSeg = d[int(info.size):]
|
||||
i.next = nextMultiNorm
|
||||
return nextMultiNorm(i)
|
||||
}
|
||||
i.rb.ss.first(i.info)
|
||||
i.rb.insertUnsafe(i.rb.src, i.p, i.info)
|
||||
return doNormComposed(i)
|
||||
}
|
||||
|
||||
func doNormComposed(i *Iter) []byte {
|
||||
// First rune should already be inserted.
|
||||
for {
|
||||
if i.p += int(i.info.size); i.p >= i.rb.nsrc {
|
||||
i.setDone()
|
||||
break
|
||||
}
|
||||
i.info = i.rb.f.info(i.rb.src, i.p)
|
||||
if s := i.rb.ss.next(i.info); s == ssStarter {
|
||||
break
|
||||
} else if s == ssOverflow {
|
||||
i.next = nextCGJCompose
|
||||
break
|
||||
}
|
||||
i.rb.insertUnsafe(i.rb.src, i.p, i.info)
|
||||
}
|
||||
i.rb.compose()
|
||||
seg := i.buf[:i.rb.flushCopy(i.buf[:])]
|
||||
return seg
|
||||
}
|
||||
|
||||
func nextCGJCompose(i *Iter) []byte {
|
||||
i.rb.ss = 0 // instead of first
|
||||
i.rb.insertCGJ()
|
||||
i.next = nextComposed
|
||||
// Note that we treat any rune with nLeadingNonStarters > 0 as a non-starter,
|
||||
// even if they are not. This is particularly dubious for U+FF9E and UFF9A.
|
||||
// If we ever change that, insert a check here.
|
||||
i.rb.ss.first(i.info)
|
||||
i.rb.insertUnsafe(i.rb.src, i.p, i.info)
|
||||
return doNormComposed(i)
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user